Merge tag 'char-misc-3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregk...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 5 Aug 2014 00:32:24 +0000 (17:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 5 Aug 2014 00:32:24 +0000 (17:32 -0700)
Pull char / misc driver patches from Greg KH:
 "Here's the big driver misc / char pull request for 3.17-rc1.

  Lots of things in here, the thunderbolt support for Apple laptops,
  some other new drivers, testing fixes, and other good things.  All
  have been in linux-next for a long time"

* tag 'char-misc-3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (119 commits)
  misc: bh1780: Introduce the use of devm_kzalloc
  Lattice ECP3 FPGA: Correct endianness
  drivers/misc/ti-st: Load firmware from ti-connectivity directory.
  dt-bindings: extcon: Add support for SM5502 MUIC device
  extcon: sm5502: Change internal hardware switch according to cable type
  extcon: sm5502: Detect cable state after completing platform booting
  extcon: sm5502: Add support new SM5502 extcon device driver
  extcon: arizona: Get MICVDD against extcon device
  extcon: Remove unnecessary OOM messages
  misc: vexpress: Fix sparse non static symbol warnings
  mei: drop unused hw dependent fw status functions
  misc: bh1770glc: Use managed functions
  pcmcia: remove DEFINE_PCI_DEVICE_TABLE usage
  misc: remove DEFINE_PCI_DEVICE_TABLE usage
  ipack: Replace DEFINE_PCI_DEVICE_TABLE macro use
  drivers/char/dsp56k.c: drop check for negativity of unsigned parameter
  mei: fix return value on disconnect timeout
  mei: don't schedule suspend in pm idle
  mei: start disconnect request timer consistently
  mei: reset client connection state on timeout
  ...

140 files changed:
Documentation/ABI/testing/sysfs-class-mei [new file with mode: 0644]
Documentation/ABI/testing/sysfs-driver-genwqe
Documentation/devicetree/bindings/extcon/extcon-sm5502.txt [new file with mode: 0644]
Documentation/mic/mic_overview.txt
Documentation/mic/mpssd/mpss
Documentation/w1/slaves/w1_ds2406 [new file with mode: 0644]
MAINTAINERS
drivers/Kconfig
drivers/Makefile
drivers/char/bsr.c
drivers/char/dsp56k.c
drivers/char/i8k.c
drivers/char/xilinx_hwicap/xilinx_hwicap.c
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/mic_x100_dma.c [new file with mode: 0644]
drivers/dma/mic_x100_dma.h [new file with mode: 0644]
drivers/extcon/Kconfig
drivers/extcon/Makefile
drivers/extcon/extcon-adc-jack.c
drivers/extcon/extcon-arizona.c
drivers/extcon/extcon-class.c
drivers/extcon/extcon-gpio.c
drivers/extcon/extcon-max14577.c
drivers/extcon/extcon-max77693.c
drivers/extcon/extcon-max8997.c
drivers/extcon/extcon-palmas.c
drivers/extcon/extcon-sm5502.c [new file with mode: 0644]
drivers/hv/channel.c
drivers/ipack/carriers/tpci200.c
drivers/ipack/devices/ipoctal.c
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/max77693-irq.c [deleted file]
drivers/mfd/max77693.c
drivers/misc/bh1770glc.c
drivers/misc/bh1780gli.c
drivers/misc/carma/carma-fpga.c
drivers/misc/dummy-irq.c
drivers/misc/genwqe/Kconfig
drivers/misc/genwqe/card_base.c
drivers/misc/genwqe/card_base.h
drivers/misc/genwqe/card_ddcb.c
drivers/misc/genwqe/card_debugfs.c
drivers/misc/genwqe/card_dev.c
drivers/misc/genwqe/card_sysfs.c
drivers/misc/genwqe/card_utils.c
drivers/misc/genwqe/genwqe_driver.h
drivers/misc/lattice-ecp3-config.c
drivers/misc/lkdtm.c
drivers/misc/mei/client.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/hw-me.c
drivers/misc/mei/hw-txe.c
drivers/misc/mei/main.c
drivers/misc/mei/mei_dev.h
drivers/misc/mei/pci-me.c
drivers/misc/mei/pci-txe.c
drivers/misc/mic/Kconfig
drivers/misc/mic/Makefile
drivers/misc/mic/bus/Makefile [new file with mode: 0644]
drivers/misc/mic/bus/mic_bus.c [new file with mode: 0644]
drivers/misc/mic/card/mic_device.c
drivers/misc/mic/card/mic_device.h
drivers/misc/mic/card/mic_virtio.c
drivers/misc/mic/card/mic_x100.c
drivers/misc/mic/host/mic_boot.c
drivers/misc/mic/host/mic_device.h
drivers/misc/mic/host/mic_intr.c
drivers/misc/mic/host/mic_intr.h
drivers/misc/mic/host/mic_main.c
drivers/misc/mic/host/mic_virtio.c
drivers/misc/mic/host/mic_virtio.h
drivers/misc/mic/host/mic_x100.c
drivers/misc/ti-st/st_kim.c
drivers/misc/vexpress-syscfg.c
drivers/misc/vmw_vmci/vmci_guest.c
drivers/pci/pci-driver.c
drivers/pci/quirks.c
drivers/pcmcia/Kconfig
drivers/pcmcia/Makefile
drivers/pcmcia/bcm63xx_pcmcia.c
drivers/pcmcia/i82092.c
drivers/pcmcia/pd6729.c
drivers/pcmcia/sa1111_jornada720.c
drivers/pcmcia/vrc4173_cardu.c
drivers/pcmcia/yenta_socket.c
drivers/regulator/max77693.c
drivers/spmi/spmi.c
drivers/thunderbolt/Kconfig [new file with mode: 0644]
drivers/thunderbolt/Makefile [new file with mode: 0644]
drivers/thunderbolt/cap.c [new file with mode: 0644]
drivers/thunderbolt/ctl.c [new file with mode: 0644]
drivers/thunderbolt/ctl.h [new file with mode: 0644]
drivers/thunderbolt/eeprom.c [new file with mode: 0644]
drivers/thunderbolt/nhi.c [new file with mode: 0644]
drivers/thunderbolt/nhi.h [new file with mode: 0644]
drivers/thunderbolt/nhi_regs.h [new file with mode: 0644]
drivers/thunderbolt/path.c [new file with mode: 0644]
drivers/thunderbolt/switch.c [new file with mode: 0644]
drivers/thunderbolt/tb.c [new file with mode: 0644]
drivers/thunderbolt/tb.h [new file with mode: 0644]
drivers/thunderbolt/tb_regs.h [new file with mode: 0644]
drivers/thunderbolt/tunnel_pci.c [new file with mode: 0644]
drivers/thunderbolt/tunnel_pci.h [new file with mode: 0644]
drivers/uio/uio_pruss.c
drivers/vme/bridges/vme_ca91cx42.h
drivers/w1/masters/ds1wm.c
drivers/w1/masters/ds2482.c
drivers/w1/masters/ds2490.c
drivers/w1/masters/mxc_w1.c
drivers/w1/slaves/Kconfig
drivers/w1/slaves/Makefile
drivers/w1/slaves/w1_ds2406.c [new file with mode: 0644]
drivers/w1/slaves/w1_ds2760.c
drivers/w1/w1.c
drivers/w1/w1_family.c
drivers/w1/w1_family.h
drivers/w1/w1_int.c
drivers/w1/w1_log.h
drivers/w1/w1_netlink.c
include/asm-generic/vmlinux.lds.h
include/linux/extcon/sm5502.h [new file with mode: 0644]
include/linux/mfd/arizona/pdata.h
include/linux/mfd/max77693-private.h
include/linux/mic_bus.h [new file with mode: 0644]
include/linux/pci.h
include/uapi/linux/genwqe/genwqe_card.h
include/uapi/linux/i8k.h
tools/hv/hv_fcopy_daemon.c
tools/testing/selftests/Makefile
tools/testing/selftests/README.txt
tools/testing/selftests/cpu-hotplug/Makefile
tools/testing/selftests/cpu-hotplug/on-off-test.sh
tools/testing/selftests/kcmp/kcmp_test.c
tools/testing/selftests/memory-hotplug/Makefile
tools/testing/selftests/memory-hotplug/on-off-test.sh
tools/testing/selftests/mqueue/Makefile
tools/testing/selftests/mqueue/mq_open_tests.c
tools/testing/selftests/mqueue/mq_perf_tests.c

diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei
new file mode 100644 (file)
index 0000000..0ec8b81
--- /dev/null
@@ -0,0 +1,16 @@
+What:          /sys/class/mei/
+Date:          May 2014
+KernelVersion: 3.17
+Contact:       Tomas Winkler <tomas.winkler@intel.com>
+Description:
+               The mei/ class sub-directory belongs to mei device class
+
+
+What:          /sys/class/mei/meiN/
+Date:          May 2014
+KernelVersion: 3.17
+Contact:       Tomas Winkler <tomas.winkler@intel.com>
+Description:
+               The /sys/class/mei/meiN directory is created for
+               each probed mei device
+
index 1870737..64ac6d5 100644 (file)
@@ -25,6 +25,15 @@ Date:           Oct 2013
 Contact:        haver@linux.vnet.ibm.com
 Description:    Interface to set the next bitstream to be used.
 
+What:           /sys/class/genwqe/genwqe<n>_card/reload_bitstream
+Date:           May 2014
+Contact:        klebers@linux.vnet.ibm.com
+Description:    Interface to trigger a PCIe card reset to reload the bitstream.
+                  sudo sh -c 'echo 1 > \
+                    /sys/class/genwqe/genwqe0_card/reload_bitstream'
+                If successfully, the card will come back with the bitstream set
+                on 'next_bitstream'.
+
 What:           /sys/class/genwqe/genwqe<n>_card/tempsens
 Date:           Oct 2013
 Contact:        haver@linux.vnet.ibm.com
diff --git a/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt b/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt
new file mode 100644 (file)
index 0000000..4ecda22
--- /dev/null
@@ -0,0 +1,23 @@
+
+* SM5502 MUIC (Micro-USB Interface Controller) device
+
+The Silicon Mitus SM5502 is a MUIC (Micro-USB Interface Controller) device
+which can detect the state of external accessory when external accessory is
+attached or detached and button is pressed or released. It is interfaced to
+the host controller using an I2C interface.
+
+Required properties:
+- compatible: Should be "siliconmitus,sm5502-muic"
+- reg: Specifies the I2C slave address of the MUIC block. It should be 0x25
+- interrupt-parent: Specifies the phandle of the interrupt controller to which
+  the interrupts from sm5502 are delivered to.
+- interrupts: Interrupt specifiers for detection interrupt sources.
+
+Example:
+
+       sm5502@25 {
+               compatible = "siliconmitus,sm5502-muic";
+               interrupt-parent = <&gpx1>;
+               interrupts = <5 0>;
+               reg = <0x25>;
+       };
index b419292..77c5418 100644 (file)
@@ -17,35 +17,50 @@ for applications. A key benefit of our solution is that it leverages
 the standard virtio framework for network, disk and console devices,
 though in our case the virtio framework is used across a PCIe bus.
 
+MIC PCIe card has a dma controller with 8 channels. These channels are
+shared between the host s/w and the card s/w. 0 to 3 are used by host
+and 4 to 7 by card. As the dma device doesn't show up as PCIe device,
+a virtual bus called mic bus is created and virtual dma devices are
+created on it by the host/card drivers. On host the channels are private
+and used only by the host driver to transfer data for the virtio devices.
+
 Here is a block diagram of the various components described above. The
 virtio backends are situated on the host rather than the card given better
 single threaded performance for the host compared to MIC, the ability of
 the host to initiate DMA's to/from the card using the MIC DMA engine and
 the fact that the virtio block storage backend can only be on the host.
 
-                              |
-       +----------+           |             +----------+
-       | Card OS  |           |             | Host OS  |
-       +----------+           |             +----------+
-                              |
-+-------+ +--------+ +------+ | +---------+  +--------+ +--------+
-| Virtio| |Virtio  | |Virtio| | |Virtio   |  |Virtio  | |Virtio  |
-| Net   | |Console | |Block | | |Net      |  |Console | |Block   |
-| Driver| |Driver  | |Driver| | |backend  |  |backend | |backend |
-+-------+ +--------+ +------+ | +---------+  +--------+ +--------+
-    |         |         |     |      |            |         |
-    |         |         |     |User  |            |         |
-    |         |         |     |------|------------|---------|-------
-    +-------------------+     |Kernel +--------------------------+
-              |               |       | Virtio over PCIe IOCTLs  |
-              |               |       +--------------------------+
-      +--------------+        |                   |
-      |Intel MIC     |        |            +---------------+
-      |Card Driver   |        |            |Intel MIC      |
-      +--------------+        |            |Host Driver    |
-              |               |            +---------------+
-              |               |                   |
-     +-------------------------------------------------------------+
-     |                                                             |
-     |                    PCIe Bus                                 |
-     +-------------------------------------------------------------+
+                                      |
+               +----------+           |             +----------+
+               | Card OS  |           |             | Host OS  |
+               +----------+           |             +----------+
+                                      |
+        +-------+ +--------+ +------+ | +---------+  +--------+ +--------+
+        | Virtio| |Virtio  | |Virtio| | |Virtio   |  |Virtio  | |Virtio  |
+        | Net   | |Console | |Block | | |Net      |  |Console | |Block   |
+        | Driver| |Driver  | |Driver| | |backend  |  |backend | |backend |
+        +-------+ +--------+ +------+ | +---------+  +--------+ +--------+
+            |         |         |     |      |            |         |
+            |         |         |     |User  |            |         |
+            |         |         |     |------|------------|---------|-------
+            +-------------------+     |Kernel +--------------------------+
+                      |               |       | Virtio over PCIe IOCTLs  |
+                      |               |       +--------------------------+
++-----------+         |               |                   |  +-----------+
+| MIC DMA   |         |               |                   |  | MIC DMA   |
+| Driver    |         |               |                   |  | Driver    |
++-----------+         |               |                   |  +-----------+
+      |               |               |                   |        |
++---------------+     |               |                   |  +----------------+
+|MIC virtual Bus|     |               |                   |  |MIC virtual Bus |
++---------------+     |               |                   |  +----------------+
+      |               |               |                   |              |
+      |   +--------------+            |            +---------------+     |
+      |   |Intel MIC     |            |            |Intel MIC      |     |
+      +---|Card Driver   |            |            |Host Driver    |     |
+          +--------------+            |            +---------------+-----+
+                      |               |                   |
+             +-------------------------------------------------------------+
+             |                                                             |
+             |                    PCIe Bus                                 |
+             +-------------------------------------------------------------+
index 3136c68..cacbdb0 100755 (executable)
@@ -48,18 +48,18 @@ start()
        fi
 
        echo -e $"Starting MPSS Stack"
-       echo -e $"Loading MIC_HOST Module"
+       echo -e $"Loading MIC_X100_DMA & MIC_HOST Modules"
 
-       # Ensure the driver is loaded
-       if [ ! -d "$sysfs" ]; then
-               modprobe mic_host
+       for f in "mic_host" "mic_x100_dma"
+       do
+               modprobe $f
                RETVAL=$?
                if [ $RETVAL -ne 0 ]; then
                        failure
                        echo
                        return $RETVAL
                fi
-       fi
+       done
 
        # Start the daemon
        echo -n $"Starting MPSSD "
@@ -170,8 +170,8 @@ unload()
        stop
 
        sleep 5
-       echo -n $"Removing MIC_HOST Module: "
-       modprobe -r mic_host
+       echo -n $"Removing MIC_HOST & MIC_X100_DMA Modules: "
+       modprobe -r mic_host mic_x100_dma
        RETVAL=$?
        [ $RETVAL -ne 0 ] && failure || success
        echo
diff --git a/Documentation/w1/slaves/w1_ds2406 b/Documentation/w1/slaves/w1_ds2406
new file mode 100644 (file)
index 0000000..8137fe6
--- /dev/null
@@ -0,0 +1,25 @@
+w1_ds2406 kernel driver
+=======================
+
+Supported chips:
+  * Maxim DS2406 (and other family 0x12) addressable switches
+
+Author: Scott Alfter <scott@alfter.us>
+
+Description
+-----------
+
+The w1_ds2406 driver allows connected devices to be switched on and off. 
+These chips also provide 128 bytes of OTP EPROM, but reading/writing it is
+not supported.  In TSOC-6 form, the DS2406 provides two switch outputs and
+can be provided with power on a dedicated input.  In TO-92 form, it provides
+one output and uses parasitic power only.
+
+The driver provides two sysfs files.  state is readable; it gives the
+current state of each switch, with PIO A in bit 0 and PIO B in bit 1.  The
+driver ORs this state with 0x30, so shell scripts get an ASCII 0/1/2/3 to
+work with.  output is writable; bits 0 and 1 control PIO A and B,
+respectively.  Bits 2-7 are ignored, so it's safe to write ASCII data.
+
+CRCs are checked on read and write.  Failed checks cause an I/O error to be
+returned.  On a failed write, the switch status is not changed.
index 1acc624..211389b 100644 (file)
@@ -7844,6 +7844,11 @@ S:       Maintained
 F:     include/linux/mmc/dw_mmc.h
 F:     drivers/mmc/host/dw_mmc*
 
+THUNDERBOLT DRIVER
+M:     Andreas Noever <andreas.noever@gmail.com>
+S:     Maintained
+F:     drivers/thunderbolt/
+
 TIMEKEEPING, CLOCKSOURCE CORE, NTP
 M:     John Stultz <john.stultz@linaro.org>
 M:     Thomas Gleixner <tglx@linutronix.de>
index 4e6e66c..622fa26 100644 (file)
@@ -178,4 +178,6 @@ source "drivers/mcb/Kconfig"
 
 source "drivers/ras/Kconfig"
 
+source "drivers/thunderbolt/Kconfig"
+
 endmenu
index 65c32b1..54bfae1 100644 (file)
@@ -159,3 +159,4 @@ obj-$(CONFIG_FMC)           += fmc/
 obj-$(CONFIG_POWERCAP)         += powercap/
 obj-$(CONFIG_MCB)              += mcb/
 obj-$(CONFIG_RAS)              += ras/
+obj-$(CONFIG_THUNDERBOLT)      += thunderbolt/
index 8fedbc2..a6cef54 100644 (file)
@@ -259,7 +259,7 @@ static int bsr_add_node(struct device_node *bn)
                }
 
                cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
-                                               cur, cur->bsr_name);
+                                               cur, "%s", cur->bsr_name);
                if (IS_ERR(cur->bsr_device)) {
                        printk(KERN_ERR "device_create failed for %s\n",
                               cur->bsr_name);
index 01a5ca7..8bf70e8 100644 (file)
@@ -383,7 +383,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd,
                        return put_user(status, &hf->status);
                }
                case DSP56K_HOST_CMD:
-                       if (arg > 31 || arg < 0)
+                       if (arg > 31)
                                return -EINVAL;
                        mutex_lock(&dsp56k_mutex);
                        dsp56k_host_interface.cvr = (u_char)((arg & DSP56K_CVR_HV_MASK) |
index 93dcad0..65525c7 100644 (file)
@@ -65,6 +65,8 @@ static char bios_version[4];
 static struct device *i8k_hwmon_dev;
 static u32 i8k_hwmon_flags;
 static int i8k_fan_mult;
+static int i8k_pwm_mult;
+static int i8k_fan_max = I8K_FAN_HIGH;
 
 #define I8K_HWMON_HAVE_TEMP1   (1 << 0)
 #define I8K_HWMON_HAVE_TEMP2   (1 << 1)
@@ -97,6 +99,10 @@ static int fan_mult = I8K_FAN_MULT;
 module_param(fan_mult, int, 0);
 MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with");
 
+static int fan_max = I8K_FAN_HIGH;
+module_param(fan_max, int, 0);
+MODULE_PARM_DESC(fan_max, "Maximum configurable fan speed");
+
 static int i8k_open_fs(struct inode *inode, struct file *file);
 static long i8k_ioctl(struct file *, unsigned int, unsigned long);
 
@@ -276,7 +282,7 @@ static int i8k_set_fan(int fan, int speed)
 {
        struct smm_regs regs = { .eax = I8K_SMM_SET_FAN, };
 
-       speed = (speed < 0) ? 0 : ((speed > I8K_FAN_MAX) ? I8K_FAN_MAX : speed);
+       speed = (speed < 0) ? 0 : ((speed > i8k_fan_max) ? i8k_fan_max : speed);
        regs.ebx = (fan & 0xff) | (speed << 8);
 
        return i8k_smm(&regs) ? : i8k_get_fan_status(fan);
@@ -521,7 +527,7 @@ static ssize_t i8k_hwmon_show_pwm(struct device *dev,
        status = i8k_get_fan_status(index);
        if (status < 0)
                return -EIO;
-       return sprintf(buf, "%d\n", clamp_val(status * 128, 0, 255));
+       return sprintf(buf, "%d\n", clamp_val(status * i8k_pwm_mult, 0, 255));
 }
 
 static ssize_t i8k_hwmon_set_pwm(struct device *dev,
@@ -535,7 +541,7 @@ static ssize_t i8k_hwmon_set_pwm(struct device *dev,
        err = kstrtoul(buf, 10, &val);
        if (err)
                return err;
-       val = clamp_val(DIV_ROUND_CLOSEST(val, 128), 0, 2);
+       val = clamp_val(DIV_ROUND_CLOSEST(val, i8k_pwm_mult), 0, i8k_fan_max);
 
        mutex_lock(&i8k_mutex);
        err = i8k_set_fan(index, val);
@@ -544,20 +550,6 @@ static ssize_t i8k_hwmon_set_pwm(struct device *dev,
        return err < 0 ? -EIO : count;
 }
 
-static ssize_t i8k_hwmon_show_label(struct device *dev,
-                                   struct device_attribute *devattr,
-                                   char *buf)
-{
-       static const char *labels[3] = {
-               "CPU",
-               "Left Fan",
-               "Right Fan",
-       };
-       int index = to_sensor_dev_attr(devattr)->index;
-
-       return sprintf(buf, "%s\n", labels[index]);
-}
-
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 1);
 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 2);
@@ -570,41 +562,34 @@ static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
                          I8K_FAN_RIGHT);
 static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
                          i8k_hwmon_set_pwm, I8K_FAN_RIGHT);
-static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_label, NULL, 2);
 
 static struct attribute *i8k_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,     /* 0 */
-       &sensor_dev_attr_temp1_label.dev_attr.attr,     /* 1 */
-       &sensor_dev_attr_temp2_input.dev_attr.attr,     /* 2 */
-       &sensor_dev_attr_temp3_input.dev_attr.attr,     /* 3 */
-       &sensor_dev_attr_temp4_input.dev_attr.attr,     /* 4 */
-       &sensor_dev_attr_fan1_input.dev_attr.attr,      /* 5 */
-       &sensor_dev_attr_pwm1.dev_attr.attr,            /* 6 */
-       &sensor_dev_attr_fan1_label.dev_attr.attr,      /* 7 */
-       &sensor_dev_attr_fan2_input.dev_attr.attr,      /* 8 */
-       &sensor_dev_attr_pwm2.dev_attr.attr,            /* 9 */
-       &sensor_dev_attr_fan2_label.dev_attr.attr,      /* 10 */
+       &sensor_dev_attr_temp2_input.dev_attr.attr,     /* 1 */
+       &sensor_dev_attr_temp3_input.dev_attr.attr,     /* 2 */
+       &sensor_dev_attr_temp4_input.dev_attr.attr,     /* 3 */
+       &sensor_dev_attr_fan1_input.dev_attr.attr,      /* 4 */
+       &sensor_dev_attr_pwm1.dev_attr.attr,            /* 5 */
+       &sensor_dev_attr_fan2_input.dev_attr.attr,      /* 6 */
+       &sensor_dev_attr_pwm2.dev_attr.attr,            /* 7 */
        NULL
 };
 
 static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
                              int index)
 {
-       if ((index == 0 || index == 1) &&
-           !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
+       if (index == 0 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
                return 0;
-       if (index == 2 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP2))
+       if (index == 1 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP2))
                return 0;
-       if (index == 3 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP3))
+       if (index == 2 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP3))
                return 0;
-       if (index == 4 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP4))
+       if (index == 3 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP4))
                return 0;
-       if (index >= 5 && index <= 7 &&
+       if (index >= 4 && index <= 5 &&
            !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN1))
                return 0;
-       if (index >= 8 && index <= 10 &&
+       if (index >= 6 && index <= 7 &&
            !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2))
                return 0;
 
@@ -659,6 +644,37 @@ static int __init i8k_init_hwmon(void)
        return 0;
 }
 
+struct i8k_config_data {
+       int fan_mult;
+       int fan_max;
+};
+
+enum i8k_configs {
+       DELL_LATITUDE_D520,
+       DELL_PRECISION_490,
+       DELL_STUDIO,
+       DELL_XPS_M140,
+};
+
+static const struct i8k_config_data i8k_config_data[] = {
+       [DELL_LATITUDE_D520] = {
+               .fan_mult = 1,
+               .fan_max = I8K_FAN_TURBO,
+       },
+       [DELL_PRECISION_490] = {
+               .fan_mult = 1,
+               .fan_max = I8K_FAN_TURBO,
+       },
+       [DELL_STUDIO] = {
+               .fan_mult = 1,
+               .fan_max = I8K_FAN_HIGH,
+       },
+       [DELL_XPS_M140] = {
+               .fan_mult = 1,
+               .fan_max = I8K_FAN_HIGH,
+       },
+};
+
 static struct dmi_system_id i8k_dmi_table[] __initdata = {
        {
                .ident = "Dell Inspiron",
@@ -681,6 +697,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron"),
                },
        },
+       {
+               .ident = "Dell Latitude D520",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D520"),
+               },
+               .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_D520],
+       },
        {
                .ident = "Dell Latitude 2",
                .matches = {
@@ -702,6 +726,15 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "MP061"),
                },
        },
+       {
+               .ident = "Dell Precision 490",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME,
+                                 "Precision WorkStation 490"),
+               },
+               .driver_data = (void *)&i8k_config_data[DELL_PRECISION_490],
+       },
        {
                .ident = "Dell Precision",
                .matches = {
@@ -729,7 +762,7 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Studio"),
                },
-               .driver_data = (void *)1,       /* fan multiplier override */
+               .driver_data = (void *)&i8k_config_data[DELL_STUDIO],
        },
        {
                .ident = "Dell XPS M140",
@@ -737,7 +770,7 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "MXC051"),
                },
-               .driver_data = (void *)1,       /* fan multiplier override */
+               .driver_data = (void *)&i8k_config_data[DELL_XPS_M140],
        },
        { }
 };
@@ -777,9 +810,17 @@ static int __init i8k_probe(void)
        }
 
        i8k_fan_mult = fan_mult;
+       i8k_fan_max = fan_max ? : I8K_FAN_HIGH; /* Must not be 0 */
        id = dmi_first_match(i8k_dmi_table);
-       if (id && fan_mult == I8K_FAN_MULT && id->driver_data)
-               i8k_fan_mult = (unsigned long)id->driver_data;
+       if (id && id->driver_data) {
+               const struct i8k_config_data *conf = id->driver_data;
+
+               if (fan_mult == I8K_FAN_MULT && conf->fan_mult)
+                       i8k_fan_mult = conf->fan_mult;
+               if (fan_max == I8K_FAN_HIGH && conf->fan_max)
+                       i8k_fan_max = conf->fan_max;
+       }
+       i8k_pwm_mult = DIV_ROUND_UP(255, i8k_fan_max);
 
        return 0;
 }
index f6345f9..9b1a5ac 100644 (file)
@@ -661,6 +661,7 @@ static int hwicap_setup(struct device *dev, int id,
        drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size);
        if (!drvdata->base_address) {
                dev_err(dev, "ioremap() failed\n");
+               retval = -ENOMEM;
                goto failed2;
        }
 
index 1eca7b9..8f6afbf 100644 (file)
@@ -33,6 +33,24 @@ if DMADEVICES
 
 comment "DMA Devices"
 
+config INTEL_MIC_X100_DMA
+       tristate "Intel MIC X100 DMA Driver"
+       depends on 64BIT && X86 && INTEL_MIC_BUS
+       select DMA_ENGINE
+       help
+         This enables DMA support for the Intel Many Integrated Core
+         (MIC) family of PCIe form factor coprocessor X100 devices that
+         run a 64 bit Linux OS. This driver will be used by both MIC
+         host and card drivers.
+
+         If you are building host kernel with a MIC device or a card
+         kernel for a MIC device, then say M (recommended) or Y, else
+         say N. If unsure say N.
+
+         More information about the Intel MIC family as well as the Linux
+         OS and tools for MIC to use with this driver are available from
+         <http://software.intel.com/en-us/mic-developer>.
+
 config INTEL_MID_DMAC
        tristate "Intel MID DMA support for Peripheral DMA controllers"
        depends on PCI && X86
index c779e1e..bd9e7fa 100644 (file)
@@ -47,3 +47,4 @@ obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
 obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
 obj-y += xilinx/
+obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
new file mode 100644 (file)
index 0000000..6de2e67
--- /dev/null
@@ -0,0 +1,774 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC X100 DMA Driver.
+ *
+ * Adapted from IOAT dma driver.
+ */
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/seq_file.h>
+
+#include "mic_x100_dma.h"
+
+#define MIC_DMA_MAX_XFER_SIZE_CARD  (1 * 1024 * 1024 -\
+                                      MIC_DMA_ALIGN_BYTES)
+#define MIC_DMA_MAX_XFER_SIZE_HOST  (1 * 1024 * 1024 >> 1)
+#define MIC_DMA_DESC_TYPE_SHIFT        60
+#define MIC_DMA_MEMCPY_LEN_SHIFT 46
+#define MIC_DMA_STAT_INTR_SHIFT 59
+
+/* high-water mark for pushing dma descriptors */
+static int mic_dma_pending_level = 4;
+
+/* Status descriptor is used to write a 64 bit value to a memory location */
+enum mic_dma_desc_format_type {
+       MIC_DMA_MEMCPY = 1,
+       MIC_DMA_STATUS,
+};
+
+static inline u32 mic_dma_hw_ring_inc(u32 val)
+{
+       return (val + 1) % MIC_DMA_DESC_RX_SIZE;
+}
+
+static inline u32 mic_dma_hw_ring_dec(u32 val)
+{
+       return val ? val - 1 : MIC_DMA_DESC_RX_SIZE - 1;
+}
+
+static inline void mic_dma_hw_ring_inc_head(struct mic_dma_chan *ch)
+{
+       ch->head = mic_dma_hw_ring_inc(ch->head);
+}
+
+/* Prepare a memcpy desc */
+static inline void mic_dma_memcpy_desc(struct mic_dma_desc *desc,
+       dma_addr_t src_phys, dma_addr_t dst_phys, u64 size)
+{
+       u64 qw0, qw1;
+
+       qw0 = src_phys;
+       qw0 |= (size >> MIC_DMA_ALIGN_SHIFT) << MIC_DMA_MEMCPY_LEN_SHIFT;
+       qw1 = MIC_DMA_MEMCPY;
+       qw1 <<= MIC_DMA_DESC_TYPE_SHIFT;
+       qw1 |= dst_phys;
+       desc->qw0 = qw0;
+       desc->qw1 = qw1;
+}
+
+/* Prepare a status desc. with @data to be written at @dst_phys */
+static inline void mic_dma_prep_status_desc(struct mic_dma_desc *desc, u64 data,
+       dma_addr_t dst_phys, bool generate_intr)
+{
+       u64 qw0, qw1;
+
+       qw0 = data;
+       qw1 = (u64) MIC_DMA_STATUS << MIC_DMA_DESC_TYPE_SHIFT | dst_phys;
+       if (generate_intr)
+               qw1 |= (1ULL << MIC_DMA_STAT_INTR_SHIFT);
+       desc->qw0 = qw0;
+       desc->qw1 = qw1;
+}
+
+static void mic_dma_cleanup(struct mic_dma_chan *ch)
+{
+       struct dma_async_tx_descriptor *tx;
+       u32 tail;
+       u32 last_tail;
+
+       spin_lock(&ch->cleanup_lock);
+       tail = mic_dma_read_cmp_cnt(ch);
+       /*
+        * This is the barrier pair for smp_wmb() in fn.
+        * mic_dma_tx_submit_unlock. It's required so that we read the
+        * updated cookie value from tx->cookie.
+        */
+       smp_rmb();
+       for (last_tail = ch->last_tail; tail != last_tail;) {
+               tx = &ch->tx_array[last_tail];
+               if (tx->cookie) {
+                       dma_cookie_complete(tx);
+                       if (tx->callback) {
+                               tx->callback(tx->callback_param);
+                               tx->callback = NULL;
+                       }
+               }
+               last_tail = mic_dma_hw_ring_inc(last_tail);
+       }
+       /* finish all completion callbacks before incrementing tail */
+       smp_mb();
+       ch->last_tail = last_tail;
+       spin_unlock(&ch->cleanup_lock);
+}
+
+static u32 mic_dma_ring_count(u32 head, u32 tail)
+{
+       u32 count;
+
+       if (head >= tail)
+               count = (tail - 0) + (MIC_DMA_DESC_RX_SIZE - head);
+       else
+               count = tail - head;
+       return count - 1;
+}
+
+/* Returns the num. of free descriptors on success, -ENOMEM on failure */
+static int mic_dma_avail_desc_ring_space(struct mic_dma_chan *ch, int required)
+{
+       struct device *dev = mic_dma_ch_to_device(ch);
+       u32 count;
+
+       count = mic_dma_ring_count(ch->head, ch->last_tail);
+       if (count < required) {
+               mic_dma_cleanup(ch);
+               count = mic_dma_ring_count(ch->head, ch->last_tail);
+       }
+
+       if (count < required) {
+               dev_dbg(dev, "Not enough desc space");
+               dev_dbg(dev, "%s %d required=%u, avail=%u\n",
+                       __func__, __LINE__, required, count);
+               return -ENOMEM;
+       } else {
+               return count;
+       }
+}
+
+/* Program memcpy descriptors into the descriptor ring and update s/w head ptr*/
+static int mic_dma_prog_memcpy_desc(struct mic_dma_chan *ch, dma_addr_t src,
+                                   dma_addr_t dst, size_t len)
+{
+       size_t current_transfer_len;
+       size_t max_xfer_size = to_mic_dma_dev(ch)->max_xfer_size;
+       /* 3 is added to make sure we have enough space for status desc */
+       int num_desc = len / max_xfer_size + 3;
+       int ret;
+
+       if (len % max_xfer_size)
+               num_desc++;
+
+       ret = mic_dma_avail_desc_ring_space(ch, num_desc);
+       if (ret < 0)
+               return ret;
+       do {
+               current_transfer_len = min(len, max_xfer_size);
+               mic_dma_memcpy_desc(&ch->desc_ring[ch->head],
+                                   src, dst, current_transfer_len);
+               mic_dma_hw_ring_inc_head(ch);
+               len -= current_transfer_len;
+               dst = dst + current_transfer_len;
+               src = src + current_transfer_len;
+       } while (len > 0);
+       return 0;
+}
+
+/* It's a h/w quirk and h/w needs 2 status descriptors for every status desc */
+static void mic_dma_prog_intr(struct mic_dma_chan *ch)
+{
+       mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
+                                ch->status_dest_micpa, false);
+       mic_dma_hw_ring_inc_head(ch);
+       mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
+                                ch->status_dest_micpa, true);
+       mic_dma_hw_ring_inc_head(ch);
+}
+
+/* Wrapper function to program memcpy descriptors/status descriptors */
+static int mic_dma_do_dma(struct mic_dma_chan *ch, int flags, dma_addr_t src,
+                         dma_addr_t dst, size_t len)
+{
+       if (-ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len))
+               return -ENOMEM;
+       /* Above mic_dma_prog_memcpy_desc() makes sure we have enough space */
+       if (flags & DMA_PREP_FENCE) {
+               mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
+                                        ch->status_dest_micpa, false);
+               mic_dma_hw_ring_inc_head(ch);
+       }
+
+       if (flags & DMA_PREP_INTERRUPT)
+               mic_dma_prog_intr(ch);
+
+       return 0;
+}
+
+static inline void mic_dma_issue_pending(struct dma_chan *ch)
+{
+       struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
+
+       spin_lock(&mic_ch->issue_lock);
+       /*
+        * Write to head triggers h/w to act on the descriptors.
+        * On MIC, writing the same head value twice causes
+        * a h/w error. On second write, h/w assumes we filled
+        * the entire ring & overwrote some of the descriptors.
+        */
+       if (mic_ch->issued == mic_ch->submitted)
+               goto out;
+       mic_ch->issued = mic_ch->submitted;
+       /*
+        * make descriptor updates visible before advancing head,
+        * this is purposefully not smp_wmb() since we are also
+        * publishing the descriptor updates to a dma device
+        */
+       wmb();
+       mic_dma_write_reg(mic_ch, MIC_DMA_REG_DHPR, mic_ch->issued);
+out:
+       spin_unlock(&mic_ch->issue_lock);
+}
+
+static inline void mic_dma_update_pending(struct mic_dma_chan *ch)
+{
+       if (mic_dma_ring_count(ch->issued, ch->submitted)
+                       > mic_dma_pending_level)
+               mic_dma_issue_pending(&ch->api_ch);
+}
+
+static dma_cookie_t mic_dma_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
+{
+       struct mic_dma_chan *mic_ch = to_mic_dma_chan(tx->chan);
+       dma_cookie_t cookie;
+
+       dma_cookie_assign(tx);
+       cookie = tx->cookie;
+       /*
+        * We need an smp write barrier here because another CPU might see
+        * an update to submitted and update h/w head even before we
+        * assigned a cookie to this tx.
+        */
+       smp_wmb();
+       mic_ch->submitted = mic_ch->head;
+       spin_unlock(&mic_ch->prep_lock);
+       mic_dma_update_pending(mic_ch);
+       return cookie;
+}
+
+static inline struct dma_async_tx_descriptor *
+allocate_tx(struct mic_dma_chan *ch)
+{
+       u32 idx = mic_dma_hw_ring_dec(ch->head);
+       struct dma_async_tx_descriptor *tx = &ch->tx_array[idx];
+
+       dma_async_tx_descriptor_init(tx, &ch->api_ch);
+       tx->tx_submit = mic_dma_tx_submit_unlock;
+       return tx;
+}
+
+/*
+ * Prepare a memcpy descriptor to be added to the ring.
+ * Note that the temporary descriptor adds an extra overhead of copying the
+ * descriptor to ring. So, we copy directly to the descriptor ring
+ */
+static struct dma_async_tx_descriptor *
+mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
+                        dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+       struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
+       struct device *dev = mic_dma_ch_to_device(mic_ch);
+       int result;
+
+       if (!len && !flags)
+               return NULL;
+
+       spin_lock(&mic_ch->prep_lock);
+       result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len);
+       if (result >= 0)
+               return allocate_tx(mic_ch);
+       dev_err(dev, "Error enqueueing dma, error=%d\n", result);
+       spin_unlock(&mic_ch->prep_lock);
+       return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags)
+{
+       struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
+       int ret;
+
+       spin_lock(&mic_ch->prep_lock);
+       ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
+       if (!ret)
+               return allocate_tx(mic_ch);
+       spin_unlock(&mic_ch->prep_lock);
+       return NULL;
+}
+
+/* Return the status of the transaction */
+static enum dma_status
+mic_dma_tx_status(struct dma_chan *ch, dma_cookie_t cookie,
+                 struct dma_tx_state *txstate)
+{
+       struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
+
+       if (DMA_COMPLETE != dma_cookie_status(ch, cookie, txstate))
+               mic_dma_cleanup(mic_ch);
+
+       return dma_cookie_status(ch, cookie, txstate);
+}
+
+static irqreturn_t mic_dma_thread_fn(int irq, void *data)
+{
+       mic_dma_cleanup((struct mic_dma_chan *)data);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t mic_dma_intr_handler(int irq, void *data)
+{
+       struct mic_dma_chan *ch = ((struct mic_dma_chan *)data);
+
+       mic_dma_ack_interrupt(ch);
+       return IRQ_WAKE_THREAD;
+}
+
+static int mic_dma_alloc_desc_ring(struct mic_dma_chan *ch)
+{
+       u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring);
+       struct device *dev = &to_mbus_device(ch)->dev;
+
+       desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES);
+       ch->desc_ring = kzalloc(desc_ring_size, GFP_KERNEL);
+
+       if (!ch->desc_ring)
+               return -ENOMEM;
+
+       ch->desc_ring_micpa = dma_map_single(dev, ch->desc_ring,
+                                            desc_ring_size, DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(dev, ch->desc_ring_micpa))
+               goto map_error;
+
+       ch->tx_array = vzalloc(MIC_DMA_DESC_RX_SIZE * sizeof(*ch->tx_array));
+       if (!ch->tx_array)
+               goto tx_error;
+       return 0;
+tx_error:
+       dma_unmap_single(dev, ch->desc_ring_micpa, desc_ring_size,
+                        DMA_BIDIRECTIONAL);
+map_error:
+       kfree(ch->desc_ring);
+       return -ENOMEM;
+}
+
+static void mic_dma_free_desc_ring(struct mic_dma_chan *ch)
+{
+       u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring);
+
+       vfree(ch->tx_array);
+       desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES);
+       dma_unmap_single(&to_mbus_device(ch)->dev, ch->desc_ring_micpa,
+                        desc_ring_size, DMA_BIDIRECTIONAL);
+       kfree(ch->desc_ring);
+       ch->desc_ring = NULL;
+}
+
+static void mic_dma_free_status_dest(struct mic_dma_chan *ch)
+{
+       dma_unmap_single(&to_mbus_device(ch)->dev, ch->status_dest_micpa,
+                        L1_CACHE_BYTES, DMA_BIDIRECTIONAL);
+       kfree(ch->status_dest);
+}
+
+static int mic_dma_alloc_status_dest(struct mic_dma_chan *ch)
+{
+       struct device *dev = &to_mbus_device(ch)->dev;
+
+       ch->status_dest = kzalloc(L1_CACHE_BYTES, GFP_KERNEL);
+       if (!ch->status_dest)
+               return -ENOMEM;
+       ch->status_dest_micpa = dma_map_single(dev, ch->status_dest,
+                                       L1_CACHE_BYTES, DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(dev, ch->status_dest_micpa)) {
+               kfree(ch->status_dest);
+               ch->status_dest = NULL;
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static int mic_dma_check_chan(struct mic_dma_chan *ch)
+{
+       if (mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR) ||
+           mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) & MIC_DMA_CHAN_QUIESCE) {
+               mic_dma_disable_chan(ch);
+               mic_dma_chan_mask_intr(ch);
+               dev_err(mic_dma_ch_to_device(ch),
+                       "%s %d error setting up mic dma chan %d\n",
+                       __func__, __LINE__, ch->ch_num);
+               return -EBUSY;
+       }
+       return 0;
+}
+
+static int mic_dma_chan_setup(struct mic_dma_chan *ch)
+{
+       if (MIC_DMA_CHAN_MIC == ch->owner)
+               mic_dma_chan_set_owner(ch);
+       mic_dma_disable_chan(ch);
+       mic_dma_chan_mask_intr(ch);
+       mic_dma_write_reg(ch, MIC_DMA_REG_DCHERRMSK, 0);
+       mic_dma_chan_set_desc_ring(ch);
+       ch->last_tail = mic_dma_read_reg(ch, MIC_DMA_REG_DTPR);
+       ch->head = ch->last_tail;
+       ch->issued = 0;
+       mic_dma_chan_unmask_intr(ch);
+       mic_dma_enable_chan(ch);
+       return mic_dma_check_chan(ch);
+}
+
+static void mic_dma_chan_destroy(struct mic_dma_chan *ch)
+{
+       mic_dma_disable_chan(ch);
+       mic_dma_chan_mask_intr(ch);
+}
+
+static void mic_dma_unregister_dma_device(struct mic_dma_device *mic_dma_dev)
+{
+       dma_async_device_unregister(&mic_dma_dev->dma_dev);
+}
+
+static int mic_dma_setup_irq(struct mic_dma_chan *ch)
+{
+       ch->cookie =
+               to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch),
+                       mic_dma_intr_handler, mic_dma_thread_fn,
+                       "mic dma_channel", ch, ch->ch_num);
+       if (IS_ERR(ch->cookie))
+               return IS_ERR(ch->cookie);
+       return 0;
+}
+
+static inline void mic_dma_free_irq(struct mic_dma_chan *ch)
+{
+       to_mbus_hw_ops(ch)->free_irq(to_mbus_device(ch), ch->cookie, ch);
+}
+
+static int mic_dma_chan_init(struct mic_dma_chan *ch)
+{
+       int ret = mic_dma_alloc_desc_ring(ch);
+
+       if (ret)
+               goto ring_error;
+       ret = mic_dma_alloc_status_dest(ch);
+       if (ret)
+               goto status_error;
+       ret = mic_dma_chan_setup(ch);
+       if (ret)
+               goto chan_error;
+       return ret;
+chan_error:
+       mic_dma_free_status_dest(ch);
+status_error:
+       mic_dma_free_desc_ring(ch);
+ring_error:
+       return ret;
+}
+
+static int mic_dma_drain_chan(struct mic_dma_chan *ch)
+{
+       struct dma_async_tx_descriptor *tx;
+       int err = 0;
+       dma_cookie_t cookie;
+
+       tx = mic_dma_prep_memcpy_lock(&ch->api_ch, 0, 0, 0, DMA_PREP_FENCE);
+       if (!tx) {
+               err = -ENOMEM;
+               goto error;
+       }
+
+       cookie = tx->tx_submit(tx);
+       if (dma_submit_error(cookie))
+               err = -ENOMEM;
+       else
+               err = dma_sync_wait(&ch->api_ch, cookie);
+       if (err) {
+               dev_err(mic_dma_ch_to_device(ch), "%s %d TO chan 0x%x\n",
+                       __func__, __LINE__, ch->ch_num);
+               err = -EIO;
+       }
+error:
+       mic_dma_cleanup(ch);
+       return err;
+}
+
+static inline void mic_dma_chan_uninit(struct mic_dma_chan *ch)
+{
+       mic_dma_chan_destroy(ch);
+       mic_dma_cleanup(ch);
+       mic_dma_free_status_dest(ch);
+       mic_dma_free_desc_ring(ch);
+}
+
+static int mic_dma_init(struct mic_dma_device *mic_dma_dev,
+                       enum mic_dma_chan_owner owner)
+{
+       int i, first_chan = mic_dma_dev->start_ch;
+       struct mic_dma_chan *ch;
+       int ret;
+
+       for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
+               unsigned long data;
+               ch = &mic_dma_dev->mic_ch[i];
+               data = (unsigned long)ch;
+               ch->ch_num = i;
+               ch->owner = owner;
+               spin_lock_init(&ch->cleanup_lock);
+               spin_lock_init(&ch->prep_lock);
+               spin_lock_init(&ch->issue_lock);
+               ret = mic_dma_setup_irq(ch);
+               if (ret)
+                       goto error;
+       }
+       return 0;
+error:
+       for (i = i - 1; i >= first_chan; i--)
+               mic_dma_free_irq(ch);
+       return ret;
+}
+
+static void mic_dma_uninit(struct mic_dma_device *mic_dma_dev)
+{
+       int i, first_chan = mic_dma_dev->start_ch;
+       struct mic_dma_chan *ch;
+
+       for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
+               ch = &mic_dma_dev->mic_ch[i];
+               mic_dma_free_irq(ch);
+       }
+}
+
+static int mic_dma_alloc_chan_resources(struct dma_chan *ch)
+{
+       int ret = mic_dma_chan_init(to_mic_dma_chan(ch));
+       if (ret)
+               return ret;
+       return MIC_DMA_DESC_RX_SIZE;
+}
+
+static void mic_dma_free_chan_resources(struct dma_chan *ch)
+{
+       struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
+       mic_dma_drain_chan(mic_ch);
+       mic_dma_chan_uninit(mic_ch);
+}
+
+/* Set the fn. handlers and register the dma device with dma api */
+static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev,
+                                      enum mic_dma_chan_owner owner)
+{
+       int i, first_chan = mic_dma_dev->start_ch;
+
+       dma_cap_zero(mic_dma_dev->dma_dev.cap_mask);
+       /*
+        * This dma engine is not capable of host memory to host memory
+        * transfers
+        */
+       dma_cap_set(DMA_MEMCPY, mic_dma_dev->dma_dev.cap_mask);
+
+       if (MIC_DMA_CHAN_HOST == owner)
+               dma_cap_set(DMA_PRIVATE, mic_dma_dev->dma_dev.cap_mask);
+       mic_dma_dev->dma_dev.device_alloc_chan_resources =
+               mic_dma_alloc_chan_resources;
+       mic_dma_dev->dma_dev.device_free_chan_resources =
+               mic_dma_free_chan_resources;
+       mic_dma_dev->dma_dev.device_tx_status = mic_dma_tx_status;
+       mic_dma_dev->dma_dev.device_prep_dma_memcpy = mic_dma_prep_memcpy_lock;
+       mic_dma_dev->dma_dev.device_prep_dma_interrupt =
+               mic_dma_prep_interrupt_lock;
+       mic_dma_dev->dma_dev.device_issue_pending = mic_dma_issue_pending;
+       mic_dma_dev->dma_dev.copy_align = MIC_DMA_ALIGN_SHIFT;
+       INIT_LIST_HEAD(&mic_dma_dev->dma_dev.channels);
+       for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
+               mic_dma_dev->mic_ch[i].api_ch.device = &mic_dma_dev->dma_dev;
+               dma_cookie_init(&mic_dma_dev->mic_ch[i].api_ch);
+               list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node,
+                             &mic_dma_dev->dma_dev.channels);
+       }
+       return dma_async_device_register(&mic_dma_dev->dma_dev);
+}
+
+/*
+ * Initializes dma channels and registers the dma device with the
+ * dma engine api.
+ */
+static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
+                                             enum mic_dma_chan_owner owner)
+{
+       struct mic_dma_device *mic_dma_dev;
+       int ret;
+       struct device *dev = &mbdev->dev;
+
+       mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL);
+       if (!mic_dma_dev) {
+               ret = -ENOMEM;
+               goto alloc_error;
+       }
+       mic_dma_dev->mbdev = mbdev;
+       mic_dma_dev->dma_dev.dev = dev;
+       mic_dma_dev->mmio = mbdev->mmio_va;
+       if (MIC_DMA_CHAN_HOST == owner) {
+               mic_dma_dev->start_ch = 0;
+               mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_HOST;
+       } else {
+               mic_dma_dev->start_ch = 4;
+               mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_CARD;
+       }
+       ret = mic_dma_init(mic_dma_dev, owner);
+       if (ret)
+               goto init_error;
+       ret = mic_dma_register_dma_device(mic_dma_dev, owner);
+       if (ret)
+               goto reg_error;
+       return mic_dma_dev;
+reg_error:
+       mic_dma_uninit(mic_dma_dev);
+init_error:
+       kfree(mic_dma_dev);
+       mic_dma_dev = NULL;
+alloc_error:
+       dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
+       return mic_dma_dev;
+}
+
+static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
+{
+       mic_dma_unregister_dma_device(mic_dma_dev);
+       mic_dma_uninit(mic_dma_dev);
+       kfree(mic_dma_dev);
+}
+
+/* DEBUGFS CODE */
+static int mic_dma_reg_seq_show(struct seq_file *s, void *pos)
+{
+       struct mic_dma_device *mic_dma_dev = s->private;
+       int i, chan_num, first_chan = mic_dma_dev->start_ch;
+       struct mic_dma_chan *ch;
+
+       seq_printf(s, "SBOX_DCR: %#x\n",
+                  mic_dma_mmio_read(&mic_dma_dev->mic_ch[first_chan],
+                                    MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR));
+       seq_puts(s, "DMA Channel Registers\n");
+       seq_printf(s, "%-10s| %-10s %-10s %-10s %-10s %-10s",
+                  "Channel", "DCAR", "DTPR", "DHPR", "DRAR_HI", "DRAR_LO");
+       seq_printf(s, " %-11s %-14s %-10s\n", "DCHERR", "DCHERRMSK", "DSTAT");
+       for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
+               ch = &mic_dma_dev->mic_ch[i];
+               chan_num = ch->ch_num;
+               seq_printf(s, "%-10i| %-#10x %-#10x %-#10x %-#10x",
+                          chan_num,
+                          mic_dma_read_reg(ch, MIC_DMA_REG_DCAR),
+                          mic_dma_read_reg(ch, MIC_DMA_REG_DTPR),
+                          mic_dma_read_reg(ch, MIC_DMA_REG_DHPR),
+                          mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_HI));
+               seq_printf(s, " %-#10x %-#10x %-#14x %-#10x\n",
+                          mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_LO),
+                          mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR),
+                          mic_dma_read_reg(ch, MIC_DMA_REG_DCHERRMSK),
+                          mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT));
+       }
+       return 0;
+}
+
+static int mic_dma_reg_debug_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, mic_dma_reg_seq_show, inode->i_private);
+}
+
+static int mic_dma_reg_debug_release(struct inode *inode, struct file *file)
+{
+       return single_release(inode, file);
+}
+
+static const struct file_operations mic_dma_reg_ops = {
+       .owner   = THIS_MODULE,
+       .open    = mic_dma_reg_debug_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = mic_dma_reg_debug_release
+};
+
+/* Debugfs parent dir */
+static struct dentry *mic_dma_dbg;
+
+static int mic_dma_driver_probe(struct mbus_device *mbdev)
+{
+       struct mic_dma_device *mic_dma_dev;
+       enum mic_dma_chan_owner owner;
+
+       if (MBUS_DEV_DMA_MIC == mbdev->id.device)
+               owner = MIC_DMA_CHAN_MIC;
+       else
+               owner = MIC_DMA_CHAN_HOST;
+
+       mic_dma_dev = mic_dma_dev_reg(mbdev, owner);
+       dev_set_drvdata(&mbdev->dev, mic_dma_dev);
+
+       if (mic_dma_dbg) {
+               mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev),
+                                                         mic_dma_dbg);
+               if (mic_dma_dev->dbg_dir)
+                       debugfs_create_file("mic_dma_reg", 0444,
+                                           mic_dma_dev->dbg_dir, mic_dma_dev,
+                                           &mic_dma_reg_ops);
+       }
+       return 0;
+}
+
+static void mic_dma_driver_remove(struct mbus_device *mbdev)
+{
+       struct mic_dma_device *mic_dma_dev;
+
+       mic_dma_dev = dev_get_drvdata(&mbdev->dev);
+       debugfs_remove_recursive(mic_dma_dev->dbg_dir);
+       mic_dma_dev_unreg(mic_dma_dev);
+}
+
+static struct mbus_device_id id_table[] = {
+       {MBUS_DEV_DMA_MIC, MBUS_DEV_ANY_ID},
+       {MBUS_DEV_DMA_HOST, MBUS_DEV_ANY_ID},
+       {0},
+};
+
+static struct mbus_driver mic_dma_driver = {
+       .driver.name =  KBUILD_MODNAME,
+       .driver.owner = THIS_MODULE,
+       .id_table = id_table,
+       .probe = mic_dma_driver_probe,
+       .remove = mic_dma_driver_remove,
+};
+
+static int __init mic_x100_dma_init(void)
+{
+       int rc = mbus_register_driver(&mic_dma_driver);
+       if (rc)
+               return rc;
+       mic_dma_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
+       return 0;
+}
+
+static void __exit mic_x100_dma_exit(void)
+{
+       debugfs_remove_recursive(mic_dma_dbg);
+       mbus_unregister_driver(&mic_dma_driver);
+}
+
+module_init(mic_x100_dma_init);
+module_exit(mic_x100_dma_exit);
+
+MODULE_DEVICE_TABLE(mbus, id_table);
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) MIC X100 DMA Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mic_x100_dma.h b/drivers/dma/mic_x100_dma.h
new file mode 100644 (file)
index 0000000..f663b0b
--- /dev/null
@@ -0,0 +1,286 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC X100 DMA Driver.
+ *
+ * Adapted from IOAT dma driver.
+ */
+#ifndef _MIC_X100_DMA_H_
+#define _MIC_X100_DMA_H_
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/mic_bus.h>
+
+#include "dmaengine.h"
+
+/*
+ * MIC has a total of 8 dma channels.
+ * Four channels are assigned for host SW use & the remaining for MIC SW.
+ * MIC DMA transfer size & addresses need to be 64 byte aligned.
+ */
+#define MIC_DMA_MAX_NUM_CHAN   8
+#define MIC_DMA_NUM_CHAN       4
+#define MIC_DMA_ALIGN_SHIFT    6
+#define MIC_DMA_ALIGN_BYTES    (1 << MIC_DMA_ALIGN_SHIFT)
+#define MIC_DMA_DESC_RX_SIZE   (128 * 1024 - 4)
+
+/*
+ * Register descriptions
+ * All the registers are 32 bit registers.
+ * DCR is a global register and all others are per-channel.
+ * DCR - bits 0, 2, 4, 6, 8, 10, 12, 14 - enable bits for channels 0 to 7
+ *      bits 1, 3, 5, 7, 9, 11, 13, 15 - owner bits for channels 0 to 7
+ * DCAR - bit 24 & 25 interrupt masks for mic owned & host owned channels
+ * DHPR - head of the descriptor ring updated by s/w
+ * DTPR - tail of the descriptor ring updated by h/w
+ * DRAR_LO - lower 32 bits of descriptor ring's mic address
+ * DRAR_HI - 3:0 - remaining 4 bits of descriptor ring's mic address
+ *          20:4 descriptor ring size
+ *          25:21 mic smpt entry number
+ * DSTAT - 16:0 h/w completion count; 31:28 dma engine status
+ * DCHERR - this register is non-zero on error
+ * DCHERRMSK - interrupt mask register
+ */
+#define MIC_DMA_HW_CMP_CNT_MASK                0x1ffff
+#define MIC_DMA_CHAN_QUIESCE           0x20000000
+#define MIC_DMA_SBOX_BASE              0x00010000
+#define MIC_DMA_SBOX_DCR               0x0000A280
+#define MIC_DMA_SBOX_CH_BASE           0x0001A000
+#define MIC_DMA_SBOX_CHAN_OFF          0x40
+#define MIC_DMA_SBOX_DCAR_IM0          (0x1 << 24)
+#define MIC_DMA_SBOX_DCAR_IM1          (0x1 << 25)
+#define MIC_DMA_SBOX_DRARHI_SYS_MASK   (0x1 << 26)
+#define MIC_DMA_REG_DCAR               0
+#define MIC_DMA_REG_DHPR               4
+#define MIC_DMA_REG_DTPR               8
+#define MIC_DMA_REG_DRAR_LO            20
+#define MIC_DMA_REG_DRAR_HI            24
+#define MIC_DMA_REG_DSTAT              32
+#define MIC_DMA_REG_DCHERR             44
+#define MIC_DMA_REG_DCHERRMSK          48
+
+/* HW dma desc */
+struct mic_dma_desc {
+       u64 qw0;
+       u64 qw1;
+};
+
+enum mic_dma_chan_owner {
+       MIC_DMA_CHAN_MIC = 0,
+       MIC_DMA_CHAN_HOST
+};
+
+/*
+ * mic_dma_chan - channel specific information
+ * @ch_num: channel number
+ * @owner: owner of this channel
+ * @last_tail: cached value of descriptor ring tail
+ * @head: index of next descriptor in desc_ring
+ * @issued: hardware notification point
+ * @submitted: index that will be used to submit descriptors to h/w
+ * @api_ch: dma engine api channel
+ * @desc_ring: dma descriptor ring
+ * @desc_ring_micpa: mic physical address of desc_ring
+ * @status_dest: destination for status (fence) descriptor
+ * @status_dest_micpa: mic address for status_dest,
+ *                    DMA controller uses this address
+ * @tx_array: array of async_tx
+ * @cleanup_lock: lock held when processing completed tx
+ * @prep_lock: lock held in prep_memcpy & released in tx_submit
+ * @issue_lock: lock used to synchronize writes to head
+ * @cookie: mic_irq cookie used with mic irq request
+ */
+struct mic_dma_chan {
+       int ch_num;
+       enum mic_dma_chan_owner owner;
+       u32 last_tail;
+       u32 head;
+       u32 issued;
+       u32 submitted;
+       struct dma_chan api_ch;
+       struct mic_dma_desc *desc_ring;
+       dma_addr_t desc_ring_micpa;
+       u64 *status_dest;
+       dma_addr_t status_dest_micpa;
+       struct dma_async_tx_descriptor *tx_array;
+       spinlock_t cleanup_lock;
+       spinlock_t prep_lock;
+       spinlock_t issue_lock;
+       struct mic_irq *cookie;
+};
+
+/*
+ * struct mic_dma_device - per mic device
+ * @mic_ch: dma channels
+ * @dma_dev: underlying dma device
+ * @mbdev: mic bus dma device
+ * @mmio: virtual address of the mmio space
+ * @dbg_dir: debugfs directory
+ * @start_ch: first channel number that can be used
+ * @max_xfer_size: maximum transfer size per dma descriptor
+ */
+struct mic_dma_device {
+       struct mic_dma_chan mic_ch[MIC_DMA_MAX_NUM_CHAN];
+       struct dma_device dma_dev;
+       struct mbus_device *mbdev;
+       void __iomem *mmio;
+       struct dentry *dbg_dir;
+       int start_ch;
+       size_t max_xfer_size;
+};
+
+static inline struct mic_dma_chan *to_mic_dma_chan(struct dma_chan *ch)
+{
+       return container_of(ch, struct mic_dma_chan, api_ch);
+}
+
+static inline struct mic_dma_device *to_mic_dma_dev(struct mic_dma_chan *ch)
+{
+       return
+       container_of((const typeof(((struct mic_dma_device *)0)->mic_ch)*)
+                    (ch - ch->ch_num), struct mic_dma_device, mic_ch);
+}
+
+static inline struct mbus_device *to_mbus_device(struct mic_dma_chan *ch)
+{
+       return to_mic_dma_dev(ch)->mbdev;
+}
+
+static inline struct mbus_hw_ops *to_mbus_hw_ops(struct mic_dma_chan *ch)
+{
+       return to_mbus_device(ch)->hw_ops;
+}
+
+static inline struct device *mic_dma_ch_to_device(struct mic_dma_chan *ch)
+{
+       return to_mic_dma_dev(ch)->dma_dev.dev;
+}
+
+static inline void __iomem *mic_dma_chan_to_mmio(struct mic_dma_chan *ch)
+{
+       return to_mic_dma_dev(ch)->mmio;
+}
+
+static inline u32 mic_dma_read_reg(struct mic_dma_chan *ch, u32 reg)
+{
+       return ioread32(mic_dma_chan_to_mmio(ch) + MIC_DMA_SBOX_CH_BASE +
+                       ch->ch_num * MIC_DMA_SBOX_CHAN_OFF + reg);
+}
+
+static inline void mic_dma_write_reg(struct mic_dma_chan *ch, u32 reg, u32 val)
+{
+       iowrite32(val, mic_dma_chan_to_mmio(ch) + MIC_DMA_SBOX_CH_BASE +
+                 ch->ch_num * MIC_DMA_SBOX_CHAN_OFF + reg);
+}
+
+static inline u32 mic_dma_mmio_read(struct mic_dma_chan *ch, u32 offset)
+{
+       return ioread32(mic_dma_chan_to_mmio(ch) + offset);
+}
+
+static inline void mic_dma_mmio_write(struct mic_dma_chan *ch, u32 val,
+                                     u32 offset)
+{
+       iowrite32(val, mic_dma_chan_to_mmio(ch) + offset);
+}
+
+static inline u32 mic_dma_read_cmp_cnt(struct mic_dma_chan *ch)
+{
+       return mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) &
+              MIC_DMA_HW_CMP_CNT_MASK;
+}
+
+static inline void mic_dma_chan_set_owner(struct mic_dma_chan *ch)
+{
+       u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
+       u32 chan_num = ch->ch_num;
+
+       dcr = (dcr & ~(0x1 << (chan_num * 2))) | (ch->owner << (chan_num * 2));
+       mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
+}
+
+static inline void mic_dma_enable_chan(struct mic_dma_chan *ch)
+{
+       u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
+
+       dcr |= 2 << (ch->ch_num << 1);
+       mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
+}
+
+static inline void mic_dma_disable_chan(struct mic_dma_chan *ch)
+{
+       u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
+
+       dcr &= ~(2 << (ch->ch_num << 1));
+       mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
+}
+
+static void mic_dma_chan_set_desc_ring(struct mic_dma_chan *ch)
+{
+       u32 drar_hi;
+       dma_addr_t desc_ring_micpa = ch->desc_ring_micpa;
+
+       drar_hi = (MIC_DMA_DESC_RX_SIZE & 0x1ffff) << 4;
+       if (MIC_DMA_CHAN_MIC == ch->owner) {
+               drar_hi |= (desc_ring_micpa >> 32) & 0xf;
+       } else {
+               drar_hi |= MIC_DMA_SBOX_DRARHI_SYS_MASK;
+               drar_hi |= ((desc_ring_micpa >> 34)
+                           & 0x1f) << 21;
+               drar_hi |= (desc_ring_micpa >> 32) & 0x3;
+       }
+       mic_dma_write_reg(ch, MIC_DMA_REG_DRAR_LO, (u32) desc_ring_micpa);
+       mic_dma_write_reg(ch, MIC_DMA_REG_DRAR_HI, drar_hi);
+}
+
+static inline void mic_dma_chan_mask_intr(struct mic_dma_chan *ch)
+{
+       u32 dcar = mic_dma_read_reg(ch, MIC_DMA_REG_DCAR);
+
+       if (MIC_DMA_CHAN_MIC == ch->owner)
+               dcar |= MIC_DMA_SBOX_DCAR_IM0;
+       else
+               dcar |= MIC_DMA_SBOX_DCAR_IM1;
+       mic_dma_write_reg(ch, MIC_DMA_REG_DCAR, dcar);
+}
+
+static inline void mic_dma_chan_unmask_intr(struct mic_dma_chan *ch)
+{
+       u32 dcar = mic_dma_read_reg(ch, MIC_DMA_REG_DCAR);
+
+       if (MIC_DMA_CHAN_MIC == ch->owner)
+               dcar &= ~MIC_DMA_SBOX_DCAR_IM0;
+       else
+               dcar &= ~MIC_DMA_SBOX_DCAR_IM1;
+       mic_dma_write_reg(ch, MIC_DMA_REG_DCAR, dcar);
+}
+
+static void mic_dma_ack_interrupt(struct mic_dma_chan *ch)
+{
+       if (MIC_DMA_CHAN_MIC == ch->owner) {
+               /* HW errata */
+               mic_dma_chan_mask_intr(ch);
+               mic_dma_chan_unmask_intr(ch);
+       }
+       to_mbus_hw_ops(ch)->ack_interrupt(to_mbus_device(ch), ch->ch_num);
+}
+#endif
index aebde48..6f2f472 100644 (file)
@@ -14,6 +14,20 @@ if EXTCON
 
 comment "Extcon Device Drivers"
 
+config EXTCON_ADC_JACK
+       tristate "ADC Jack extcon support"
+       depends on IIO
+       help
+         Say Y here to enable extcon device driver based on ADC values.
+
+config EXTCON_ARIZONA
+       tristate "Wolfson Arizona EXTCON support"
+       depends on MFD_ARIZONA && INPUT && SND_SOC
+       help
+         Say Y here to enable support for external accessory detection
+         with Wolfson Arizona devices. These are audio CODECs with
+         advanced audio accessory detection support.
+
 config EXTCON_GPIO
        tristate "GPIO extcon support"
        depends on GPIOLIB
@@ -21,12 +35,6 @@ config EXTCON_GPIO
          Say Y here to enable GPIO based extcon support. Note that GPIO
          extcon supports single state per extcon instance.
 
-config EXTCON_ADC_JACK
-       tristate "ADC Jack extcon support"
-       depends on IIO
-       help
-         Say Y here to enable extcon device driver based on ADC values.
-
 config EXTCON_MAX14577
        tristate "MAX14577/77836 EXTCON Support"
        depends on MFD_MAX14577
@@ -55,14 +63,6 @@ config EXTCON_MAX8997
          Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory
          detector and switch.
 
-config EXTCON_ARIZONA
-       tristate "Wolfson Arizona EXTCON support"
-       depends on MFD_ARIZONA && INPUT && SND_SOC
-       help
-         Say Y here to enable support for external accessory detection
-         with Wolfson Arizona devices. These are audio CODECs with
-         advanced audio accessory detection support.
-
 config EXTCON_PALMAS
        tristate "Palmas USB EXTCON support"
        depends on MFD_PALMAS
@@ -70,4 +70,14 @@ config EXTCON_PALMAS
          Say Y here to enable support for USB peripheral and USB host
          detection by palmas usb.
 
+config EXTCON_SM5502
+       tristate "SM5502 EXTCON support"
+       select IRQ_DOMAIN
+       select REGMAP_I2C
+       select REGMAP_IRQ
+       help
+         If you say yes here you get support for the MUIC device of
+         Silicon Mitus SM5502. The SM5502 is a USB port accessory
+         detector and switch.
+
 endif # MULTISTATE_SWITCH
index bf7861e..b38546e 100644 (file)
@@ -1,12 +1,13 @@
-#
+
 # Makefile for external connector class (extcon) devices
 #
 
 obj-$(CONFIG_EXTCON)           += extcon-class.o
-obj-$(CONFIG_EXTCON_GPIO)      += extcon-gpio.o
 obj-$(CONFIG_EXTCON_ADC_JACK)  += extcon-adc-jack.o
+obj-$(CONFIG_EXTCON_ARIZONA)   += extcon-arizona.o
+obj-$(CONFIG_EXTCON_GPIO)      += extcon-gpio.o
 obj-$(CONFIG_EXTCON_MAX14577)  += extcon-max14577.o
 obj-$(CONFIG_EXTCON_MAX77693)  += extcon-max77693.o
 obj-$(CONFIG_EXTCON_MAX8997)   += extcon-max8997.o
-obj-$(CONFIG_EXTCON_ARIZONA)   += extcon-arizona.o
 obj-$(CONFIG_EXTCON_PALMAS)    += extcon-palmas.o
+obj-$(CONFIG_EXTCON_SM5502)    += extcon-sm5502.o
index e18f95b..d860229 100644 (file)
@@ -112,7 +112,6 @@ static int adc_jack_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "failed to allocate extcon device\n");
                return -ENOMEM;
        }
-       data->edev->dev.parent = &pdev->dev;
        data->edev->name = pdata->name;
 
        /* Check the length of array and set num_cables */
index 6c84e3d..ba51588 100644 (file)
 #define ARIZONA_ACCDET_MODE_HPL 1
 #define ARIZONA_ACCDET_MODE_HPR 2
 
+#define ARIZONA_MICD_CLAMP_MODE_JDL      0x4
+#define ARIZONA_MICD_CLAMP_MODE_JDH      0x5
+#define ARIZONA_MICD_CLAMP_MODE_JDL_GP5H 0x9
+#define ARIZONA_MICD_CLAMP_MODE_JDH_GP5H 0xb
+
 #define ARIZONA_HPDET_MAX 10000
 
 #define HPDET_DEBOUNCE 500
@@ -324,14 +329,17 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
 }
 
 static struct {
+       unsigned int threshold;
        unsigned int factor_a;
        unsigned int factor_b;
 } arizona_hpdet_b_ranges[] = {
-       {  5528,   362464 },
-       { 11084,  6186851 },
-       { 11065, 65460395 },
+       { 100,  5528,   362464 },
+       { 169, 11084,  6186851 },
+       { 169, 11065, 65460395 },
 };
 
+#define ARIZONA_HPDET_B_RANGE_MAX 0x3fb
+
 static struct {
        int min;
        int max;
@@ -386,7 +394,8 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
                           >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
 
                if (range < ARRAY_SIZE(arizona_hpdet_b_ranges) - 1 &&
-                   (val < 100 || val >= 0x3fb)) {
+                   (val < arizona_hpdet_b_ranges[range].threshold ||
+                    val >= ARIZONA_HPDET_B_RANGE_MAX)) {
                        range++;
                        dev_dbg(arizona->dev, "Moving to HPDET range %d\n",
                                range);
@@ -399,7 +408,8 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
                }
 
                /* If we go out of range report top of range */
-               if (val < 100 || val >= 0x3fb) {
+               if (val < arizona_hpdet_b_ranges[range].threshold ||
+                   val >= ARIZONA_HPDET_B_RANGE_MAX) {
                        dev_dbg(arizona->dev, "Measurement out of range\n");
                        return ARIZONA_HPDET_MAX;
                }
@@ -664,9 +674,8 @@ err:
                           ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
 
        /* Just report headphone */
-       ret = extcon_update_state(info->edev,
-                                 1 << ARIZONA_CABLE_HEADPHONE,
-                                 1 << ARIZONA_CABLE_HEADPHONE);
+       ret = extcon_set_cable_state_(info->edev,
+                                     ARIZONA_CABLE_HEADPHONE, true);
        if (ret != 0)
                dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
 
@@ -723,9 +732,8 @@ err:
                           ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
 
        /* Just report headphone */
-       ret = extcon_update_state(info->edev,
-                                 1 << ARIZONA_CABLE_HEADPHONE,
-                                 1 << ARIZONA_CABLE_HEADPHONE);
+       ret = extcon_set_cable_state_(info->edev,
+                                     ARIZONA_CABLE_HEADPHONE, true);
        if (ret != 0)
                dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
 
@@ -812,16 +820,15 @@ static void arizona_micd_detect(struct work_struct *work)
        if (info->detecting && (val & ARIZONA_MICD_LVL_8)) {
                arizona_identify_headphone(info);
 
-               ret = extcon_update_state(info->edev,
-                                         1 << ARIZONA_CABLE_MICROPHONE,
-                                         1 << ARIZONA_CABLE_MICROPHONE);
+               ret = extcon_set_cable_state_(info->edev,
+                                             ARIZONA_CABLE_MICROPHONE, true);
 
                if (ret != 0)
                        dev_err(arizona->dev, "Headset report failed: %d\n",
                                ret);
 
                /* Don't need to regulate for button detection */
-               ret = regulator_allow_bypass(info->micvdd, false);
+               ret = regulator_allow_bypass(info->micvdd, true);
                if (ret != 0) {
                        dev_err(arizona->dev, "Failed to bypass MICVDD: %d\n",
                                ret);
@@ -962,10 +969,16 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
 
        if (arizona->pdata.jd_gpio5) {
                mask = ARIZONA_MICD_CLAMP_STS;
-               present = 0;
+               if (arizona->pdata.jd_invert)
+                       present = ARIZONA_MICD_CLAMP_STS;
+               else
+                       present = 0;
        } else {
                mask = ARIZONA_JD1_STS;
-               present = ARIZONA_JD1_STS;
+               if (arizona->pdata.jd_invert)
+                       present = 0;
+               else
+                       present = ARIZONA_JD1_STS;
        }
 
        ret = regmap_read(arizona->regmap, ARIZONA_AOD_IRQ_RAW_STATUS, &val);
@@ -1096,6 +1109,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
        struct arizona_pdata *pdata = &arizona->pdata;
        struct arizona_extcon_info *info;
        unsigned int val;
+       unsigned int clamp_mode;
        int jack_irq_fall, jack_irq_rise;
        int ret, mode, i, j;
 
@@ -1103,12 +1117,10 @@ static int arizona_extcon_probe(struct platform_device *pdev)
                return -EPROBE_DEFER;
 
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
-       if (!info) {
-               dev_err(&pdev->dev, "Failed to allocate memory\n");
+       if (!info)
                return -ENOMEM;
-       }
 
-       info->micvdd = devm_regulator_get(arizona->dev, "MICVDD");
+       info->micvdd = devm_regulator_get(&pdev->dev, "MICVDD");
        if (IS_ERR(info->micvdd)) {
                ret = PTR_ERR(info->micvdd);
                dev_err(arizona->dev, "Failed to get MICVDD: %d\n", ret);
@@ -1156,7 +1168,6 @@ static int arizona_extcon_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        info->edev->name = "Headset Jack";
-       info->edev->dev.parent = arizona->dev;
 
        ret = devm_extcon_dev_register(&pdev->dev, info->edev);
        if (ret < 0) {
@@ -1174,7 +1185,6 @@ static int arizona_extcon_probe(struct platform_device *pdev)
 
        info->input->name = "Headset";
        info->input->phys = "arizona/extcon";
-       info->input->dev.parent = &pdev->dev;
 
        if (pdata->num_micd_configs) {
                info->micd_modes = pdata->micd_configs;
@@ -1305,15 +1315,21 @@ static int arizona_extcon_probe(struct platform_device *pdev)
                        regmap_write(arizona->regmap, ARIZONA_GPIO5_CTRL,
                                     val);
 
-                       regmap_update_bits(arizona->regmap,
-                                          ARIZONA_MICD_CLAMP_CONTROL,
-                                          ARIZONA_MICD_CLAMP_MODE_MASK, 0x9);
+                       if (arizona->pdata.jd_invert)
+                               clamp_mode = ARIZONA_MICD_CLAMP_MODE_JDH_GP5H;
+                       else
+                               clamp_mode = ARIZONA_MICD_CLAMP_MODE_JDL_GP5H;
                } else {
-                       regmap_update_bits(arizona->regmap,
-                                          ARIZONA_MICD_CLAMP_CONTROL,
-                                          ARIZONA_MICD_CLAMP_MODE_MASK, 0x4);
+                       if (arizona->pdata.jd_invert)
+                               clamp_mode = ARIZONA_MICD_CLAMP_MODE_JDH;
+                       else
+                               clamp_mode = ARIZONA_MICD_CLAMP_MODE_JDL;
                }
 
+               regmap_update_bits(arizona->regmap,
+                                  ARIZONA_MICD_CLAMP_CONTROL,
+                                  ARIZONA_MICD_CLAMP_MODE_MASK, clamp_mode);
+
                regmap_update_bits(arizona->regmap,
                                   ARIZONA_JACK_DETECT_DEBOUNCE,
                                   ARIZONA_MICD_CLAMP_DB,
index 18d42c0..4c2f2c5 100644 (file)
@@ -645,6 +645,8 @@ struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
                return edev;
        }
 
+       edev->dev.parent = dev;
+
        *ptr = edev;
        devres_add(dev, ptr);
 
index 645b283..5b7ec27 100644 (file)
@@ -105,7 +105,6 @@ static int gpio_extcon_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        extcon_data->edev->name = pdata->name;
-       extcon_data->edev->dev.parent = &pdev->dev;
 
        extcon_data->gpio = pdata->gpio;
        extcon_data->gpio_active_low = pdata->gpio_active_low;
index d49e891..7309743 100644 (file)
@@ -692,10 +692,9 @@ static int max14577_muic_probe(struct platform_device *pdev)
        u8 id;
 
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
-       if (!info) {
-               dev_err(&pdev->dev, "failed to allocate memory\n");
+       if (!info)
                return -ENOMEM;
-       }
+
        info->dev = &pdev->dev;
        info->max14577 = max14577;
 
index 2c7c3e1..77460f2 100644 (file)
@@ -255,10 +255,10 @@ static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
        case ADC_DEBOUNCE_TIME_10MS:
        case ADC_DEBOUNCE_TIME_25MS:
        case ADC_DEBOUNCE_TIME_38_62MS:
-               ret = max77693_update_reg(info->max77693->regmap_muic,
+               ret = regmap_update_bits(info->max77693->regmap_muic,
                                          MAX77693_MUIC_REG_CTRL3,
-                                         time << CONTROL3_ADCDBSET_SHIFT,
-                                         CONTROL3_ADCDBSET_MASK);
+                                         CONTROL3_ADCDBSET_MASK,
+                                         time << CONTROL3_ADCDBSET_SHIFT);
                if (ret) {
                        dev_err(info->dev, "failed to set ADC debounce time\n");
                        return ret;
@@ -286,15 +286,15 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
                u8 val, bool attached)
 {
        int ret = 0;
-       u8 ctrl1, ctrl2 = 0;
+       unsigned int ctrl1, ctrl2 = 0;
 
        if (attached)
                ctrl1 = val;
        else
                ctrl1 = CONTROL1_SW_OPEN;
 
-       ret = max77693_update_reg(info->max77693->regmap_muic,
-                       MAX77693_MUIC_REG_CTRL1, ctrl1, COMP_SW_MASK);
+       ret = regmap_update_bits(info->max77693->regmap_muic,
+                       MAX77693_MUIC_REG_CTRL1, COMP_SW_MASK, ctrl1);
        if (ret < 0) {
                dev_err(info->dev, "failed to update MUIC register\n");
                return ret;
@@ -305,9 +305,9 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
        else
                ctrl2 |= CONTROL2_LOWPWR_MASK;  /* LowPwr=1, CPEn=0 */
 
-       ret = max77693_update_reg(info->max77693->regmap_muic,
-                       MAX77693_MUIC_REG_CTRL2, ctrl2,
-                       CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK);
+       ret = regmap_update_bits(info->max77693->regmap_muic,
+                       MAX77693_MUIC_REG_CTRL2,
+                       CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK, ctrl2);
        if (ret < 0) {
                dev_err(info->dev, "failed to update MUIC register\n");
                return ret;
@@ -969,8 +969,8 @@ static void max77693_muic_irq_work(struct work_struct *work)
                if (info->irq == muic_irqs[i].virq)
                        irq_type = muic_irqs[i].irq;
 
-       ret = max77693_bulk_read(info->max77693->regmap_muic,
-                       MAX77693_MUIC_REG_STATUS1, 2, info->status);
+       ret = regmap_bulk_read(info->max77693->regmap_muic,
+                       MAX77693_MUIC_REG_STATUS1, info->status, 2);
        if (ret) {
                dev_err(info->dev, "failed to read MUIC register\n");
                mutex_unlock(&info->mutex);
@@ -1042,8 +1042,8 @@ static int max77693_muic_detect_accessory(struct max77693_muic_info *info)
        mutex_lock(&info->mutex);
 
        /* Read STATUSx register to detect accessory */
-       ret = max77693_bulk_read(info->max77693->regmap_muic,
-                       MAX77693_MUIC_REG_STATUS1, 2, info->status);
+       ret = regmap_bulk_read(info->max77693->regmap_muic,
+                       MAX77693_MUIC_REG_STATUS1, info->status, 2);
        if (ret) {
                dev_err(info->dev, "failed to read MUIC register\n");
                mutex_unlock(&info->mutex);
@@ -1095,14 +1095,13 @@ static int max77693_muic_probe(struct platform_device *pdev)
        int delay_jiffies;
        int ret;
        int i;
-       u8 id;
+       unsigned int id;
 
        info = devm_kzalloc(&pdev->dev, sizeof(struct max77693_muic_info),
                                   GFP_KERNEL);
-       if (!info) {
-               dev_err(&pdev->dev, "failed to allocate memory\n");
+       if (!info)
                return -ENOMEM;
-       }
+
        info->dev = &pdev->dev;
        info->max77693 = max77693;
        if (info->max77693->regmap_muic) {
@@ -1154,7 +1153,8 @@ static int max77693_muic_probe(struct platform_device *pdev)
                struct max77693_muic_irq *muic_irq = &muic_irqs[i];
                unsigned int virq = 0;
 
-               virq = irq_create_mapping(max77693->irq_domain, muic_irq->irq);
+               virq = regmap_irq_get_virq(max77693->irq_data_muic,
+                                       muic_irq->irq);
                if (!virq) {
                        ret = -EINVAL;
                        goto err_irq;
@@ -1183,7 +1183,6 @@ static int max77693_muic_probe(struct platform_device *pdev)
                goto err_irq;
        }
        info->edev->name = DEV_NAME;
-       info->edev->dev.parent = &pdev->dev;
 
        ret = devm_extcon_dev_register(&pdev->dev, info->edev);
        if (ret) {
@@ -1204,7 +1203,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
                enum max77693_irq_source irq_src
                                = MAX77693_IRQ_GROUP_NR;
 
-               max77693_write_reg(info->max77693->regmap_muic,
+               regmap_write(info->max77693->regmap_muic,
                                init_data[i].addr,
                                init_data[i].data);
 
@@ -1262,7 +1261,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
         max77693_muic_set_path(info, info->path_uart, true);
 
        /* Check revision number of MUIC device*/
-       ret = max77693_read_reg(info->max77693->regmap_muic,
+       ret = regmap_read(info->max77693->regmap_muic,
                        MAX77693_MUIC_REG_ID, &id);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to read revision number\n");
index d9f7f1b..75e501c 100644 (file)
@@ -661,10 +661,8 @@ static int max8997_muic_probe(struct platform_device *pdev)
 
        info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info),
                            GFP_KERNEL);
-       if (!info) {
-               dev_err(&pdev->dev, "failed to allocate memory\n");
+       if (!info)
                return -ENOMEM;
-       }
 
        info->dev = &pdev->dev;
        info->muic = max8997->muic;
@@ -706,7 +704,6 @@ static int max8997_muic_probe(struct platform_device *pdev)
                goto err_irq;
        }
        info->edev->name = DEV_NAME;
-       info->edev->dev.parent = &pdev->dev;
 
        ret = devm_extcon_dev_register(&pdev->dev, info->edev);
        if (ret) {
index 7417ce8..230e122 100644 (file)
@@ -194,7 +194,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        palmas_usb->edev->name = kstrdup(node->name, GFP_KERNEL);
-       palmas_usb->edev->dev.parent = palmas_usb->dev;
        palmas_usb->edev->mutually_exclusive = mutually_exclusive;
 
        status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
@@ -278,7 +277,7 @@ static int palmas_usb_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(palmas_pm_ops, palmas_usb_suspend, palmas_usb_resume);
 
-static struct of_device_id of_palmas_match_tbl[] = {
+static const struct of_device_id of_palmas_match_tbl[] = {
        { .compatible = "ti,palmas-usb", },
        { .compatible = "ti,palmas-usb-vid", },
        { .compatible = "ti,twl6035-usb", },
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
new file mode 100644 (file)
index 0000000..560d7dc
--- /dev/null
@@ -0,0 +1,724 @@
+/*
+ * extcon-sm5502.c - Silicon Mitus SM5502 extcon drvier to support USB switches
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/extcon.h>
+#include <linux/extcon/sm5502.h>
+
+#define        DELAY_MS_DEFAULT                17000   /* unit: millisecond */
+
+struct muic_irq {
+       unsigned int irq;
+       const char *name;
+       unsigned int virq;
+};
+
+struct reg_data {
+       u8 reg;
+       unsigned int val;
+       bool invert;
+};
+
+struct sm5502_muic_info {
+       struct device *dev;
+       struct extcon_dev *edev;
+
+       struct i2c_client *i2c;
+       struct regmap *regmap;
+
+       struct regmap_irq_chip_data *irq_data;
+       struct muic_irq *muic_irqs;
+       unsigned int num_muic_irqs;
+       int irq;
+       bool irq_attach;
+       bool irq_detach;
+       struct work_struct irq_work;
+
+       struct reg_data *reg_data;
+       unsigned int num_reg_data;
+
+       struct mutex mutex;
+
+       /*
+        * Use delayed workqueue to detect cable state and then
+        * notify cable state to notifiee/platform through uevent.
+        * After completing the booting of platform, the extcon provider
+        * driver should notify cable state to upper layer.
+        */
+       struct delayed_work wq_detcable;
+};
+
+/* Default value of SM5502 register to bring up MUIC device. */
+static struct reg_data sm5502_reg_data[] = {
+       {
+               .reg = SM5502_REG_CONTROL,
+               .val = SM5502_REG_CONTROL_MASK_INT_MASK,
+               .invert = false,
+       }, {
+               .reg = SM5502_REG_INTMASK1,
+               .val = SM5502_REG_INTM1_KP_MASK
+                       | SM5502_REG_INTM1_LKP_MASK
+                       | SM5502_REG_INTM1_LKR_MASK,
+               .invert = true,
+       }, {
+               .reg = SM5502_REG_INTMASK2,
+               .val = SM5502_REG_INTM2_VBUS_DET_MASK
+                       | SM5502_REG_INTM2_REV_ACCE_MASK
+                       | SM5502_REG_INTM2_ADC_CHG_MASK
+                       | SM5502_REG_INTM2_STUCK_KEY_MASK
+                       | SM5502_REG_INTM2_STUCK_KEY_RCV_MASK
+                       | SM5502_REG_INTM2_MHL_MASK,
+               .invert = true,
+       },
+       { }
+};
+
+/* List of detectable cables */
+enum {
+       EXTCON_CABLE_USB = 0,
+       EXTCON_CABLE_USB_HOST,
+       EXTCON_CABLE_TA,
+
+       EXTCON_CABLE_END,
+};
+
+static const char *sm5502_extcon_cable[] = {
+       [EXTCON_CABLE_USB]      = "USB",
+       [EXTCON_CABLE_USB_HOST] = "USB-Host",
+       [EXTCON_CABLE_TA]       = "TA",
+       NULL,
+};
+
+/* Define supported accessory type */
+enum sm5502_muic_acc_type {
+       SM5502_MUIC_ADC_GROUND = 0x0,
+       SM5502_MUIC_ADC_SEND_END_BUTTON,
+       SM5502_MUIC_ADC_REMOTE_S1_BUTTON,
+       SM5502_MUIC_ADC_REMOTE_S2_BUTTON,
+       SM5502_MUIC_ADC_REMOTE_S3_BUTTON,
+       SM5502_MUIC_ADC_REMOTE_S4_BUTTON,
+       SM5502_MUIC_ADC_REMOTE_S5_BUTTON,
+       SM5502_MUIC_ADC_REMOTE_S6_BUTTON,
+       SM5502_MUIC_ADC_REMOTE_S7_BUTTON,
+       SM5502_MUIC_ADC_REMOTE_S8_BUTTON,
+       SM5502_MUIC_ADC_REMOTE_S9_BUTTON,
+       SM5502_MUIC_ADC_REMOTE_S10_BUTTON,
+       SM5502_MUIC_ADC_REMOTE_S11_BUTTON,
+       SM5502_MUIC_ADC_REMOTE_S12_BUTTON,
+       SM5502_MUIC_ADC_RESERVED_ACC_1,
+       SM5502_MUIC_ADC_RESERVED_ACC_2,
+       SM5502_MUIC_ADC_RESERVED_ACC_3,
+       SM5502_MUIC_ADC_RESERVED_ACC_4,
+       SM5502_MUIC_ADC_RESERVED_ACC_5,
+       SM5502_MUIC_ADC_AUDIO_TYPE2,
+       SM5502_MUIC_ADC_PHONE_POWERED_DEV,
+       SM5502_MUIC_ADC_TTY_CONVERTER,
+       SM5502_MUIC_ADC_UART_CABLE,
+       SM5502_MUIC_ADC_TYPE1_CHARGER,
+       SM5502_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB,
+       SM5502_MUIC_ADC_FACTORY_MODE_BOOT_ON_USB,
+       SM5502_MUIC_ADC_AUDIO_VIDEO_CABLE,
+       SM5502_MUIC_ADC_TYPE2_CHARGER,
+       SM5502_MUIC_ADC_FACTORY_MODE_BOOT_OFF_UART,
+       SM5502_MUIC_ADC_FACTORY_MODE_BOOT_ON_UART,
+       SM5502_MUIC_ADC_AUDIO_TYPE1,
+       SM5502_MUIC_ADC_OPEN = 0x1f,
+
+       /* The below accessories have same ADC value (0x1f or 0x1e).
+          So, Device type1 is used to separate specific accessory. */
+                                                       /* |---------|--ADC| */
+                                                       /* |    [7:5]|[4:0]| */
+       SM5502_MUIC_ADC_AUDIO_TYPE1_FULL_REMOTE = 0x3e, /* |      001|11110| */
+       SM5502_MUIC_ADC_AUDIO_TYPE1_SEND_END = 0x5e,    /* |      010|11110| */
+                                                       /* |Dev Type1|--ADC| */
+       SM5502_MUIC_ADC_OPEN_USB = 0x5f,                /* |      010|11111| */
+       SM5502_MUIC_ADC_OPEN_TA = 0xdf,                 /* |      110|11111| */
+       SM5502_MUIC_ADC_OPEN_USB_OTG = 0xff,            /* |      111|11111| */
+};
+
+/* List of supported interrupt for SM5502 */
+static struct muic_irq sm5502_muic_irqs[] = {
+       { SM5502_IRQ_INT1_ATTACH,       "muic-attach" },
+       { SM5502_IRQ_INT1_DETACH,       "muic-detach" },
+       { SM5502_IRQ_INT1_KP,           "muic-kp" },
+       { SM5502_IRQ_INT1_LKP,          "muic-lkp" },
+       { SM5502_IRQ_INT1_LKR,          "muic-lkr" },
+       { SM5502_IRQ_INT1_OVP_EVENT,    "muic-ovp-event" },
+       { SM5502_IRQ_INT1_OCP_EVENT,    "muic-ocp-event" },
+       { SM5502_IRQ_INT1_OVP_OCP_DIS,  "muic-ovp-ocp-dis" },
+       { SM5502_IRQ_INT2_VBUS_DET,     "muic-vbus-det" },
+       { SM5502_IRQ_INT2_REV_ACCE,     "muic-rev-acce" },
+       { SM5502_IRQ_INT2_ADC_CHG,      "muic-adc-chg" },
+       { SM5502_IRQ_INT2_STUCK_KEY,    "muic-stuck-key" },
+       { SM5502_IRQ_INT2_STUCK_KEY_RCV, "muic-stuck-key-rcv" },
+       { SM5502_IRQ_INT2_MHL,          "muic-mhl" },
+};
+
+/* Define interrupt list of SM5502 to register regmap_irq */
+static const struct regmap_irq sm5502_irqs[] = {
+       /* INT1 interrupts */
+       { .reg_offset = 0, .mask = SM5502_IRQ_INT1_ATTACH_MASK, },
+       { .reg_offset = 0, .mask = SM5502_IRQ_INT1_DETACH_MASK, },
+       { .reg_offset = 0, .mask = SM5502_IRQ_INT1_KP_MASK, },
+       { .reg_offset = 0, .mask = SM5502_IRQ_INT1_LKP_MASK, },
+       { .reg_offset = 0, .mask = SM5502_IRQ_INT1_LKR_MASK, },
+       { .reg_offset = 0, .mask = SM5502_IRQ_INT1_OVP_EVENT_MASK, },
+       { .reg_offset = 0, .mask = SM5502_IRQ_INT1_OCP_EVENT_MASK, },
+       { .reg_offset = 0, .mask = SM5502_IRQ_INT1_OVP_OCP_DIS_MASK, },
+
+       /* INT2 interrupts */
+       { .reg_offset = 1, .mask = SM5502_IRQ_INT2_VBUS_DET_MASK,},
+       { .reg_offset = 1, .mask = SM5502_IRQ_INT2_REV_ACCE_MASK, },
+       { .reg_offset = 1, .mask = SM5502_IRQ_INT2_ADC_CHG_MASK, },
+       { .reg_offset = 1, .mask = SM5502_IRQ_INT2_STUCK_KEY_MASK, },
+       { .reg_offset = 1, .mask = SM5502_IRQ_INT2_STUCK_KEY_RCV_MASK, },
+       { .reg_offset = 1, .mask = SM5502_IRQ_INT2_MHL_MASK, },
+};
+
+static const struct regmap_irq_chip sm5502_muic_irq_chip = {
+       .name                   = "sm5502",
+       .status_base            = SM5502_REG_INT1,
+       .mask_base              = SM5502_REG_INTMASK1,
+       .mask_invert            = false,
+       .num_regs               = 2,
+       .irqs                   = sm5502_irqs,
+       .num_irqs               = ARRAY_SIZE(sm5502_irqs),
+};
+
+/* Define regmap configuration of SM5502 for I2C communication  */
+static bool sm5502_muic_volatile_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case SM5502_REG_INTMASK1:
+       case SM5502_REG_INTMASK2:
+               return true;
+       default:
+               break;
+       }
+       return false;
+}
+
+static const struct regmap_config sm5502_muic_regmap_config = {
+       .reg_bits       = 8,
+       .val_bits       = 8,
+       .volatile_reg   = sm5502_muic_volatile_reg,
+       .max_register   = SM5502_REG_END,
+};
+
+/* Change DM_CON/DP_CON/VBUSIN switch according to cable type */
+static int sm5502_muic_set_path(struct sm5502_muic_info *info,
+                               unsigned int con_sw, unsigned int vbus_sw,
+                               bool attached)
+{
+       int ret;
+
+       if (!attached) {
+               con_sw  = DM_DP_SWITCH_OPEN;
+               vbus_sw = VBUSIN_SWITCH_OPEN;
+       }
+
+       switch (con_sw) {
+       case DM_DP_SWITCH_OPEN:
+       case DM_DP_SWITCH_USB:
+       case DM_DP_SWITCH_AUDIO:
+       case DM_DP_SWITCH_UART:
+               ret = regmap_update_bits(info->regmap, SM5502_REG_MANUAL_SW1,
+                                        SM5502_REG_MANUAL_SW1_DP_MASK |
+                                        SM5502_REG_MANUAL_SW1_DM_MASK,
+                                        con_sw);
+               if (ret < 0) {
+                       dev_err(info->dev,
+                               "cannot update DM_CON/DP_CON switch\n");
+                       return ret;
+               }
+               break;
+       default:
+               dev_err(info->dev, "Unknown DM_CON/DP_CON switch type (%d)\n",
+                               con_sw);
+               return -EINVAL;
+       };
+
+       switch (vbus_sw) {
+       case VBUSIN_SWITCH_OPEN:
+       case VBUSIN_SWITCH_VBUSOUT:
+       case VBUSIN_SWITCH_MIC:
+       case VBUSIN_SWITCH_VBUSOUT_WITH_USB:
+               ret = regmap_update_bits(info->regmap, SM5502_REG_MANUAL_SW1,
+                                        SM5502_REG_MANUAL_SW1_VBUSIN_MASK,
+                                        vbus_sw);
+               if (ret < 0) {
+                       dev_err(info->dev,
+                               "cannot update VBUSIN switch\n");
+                       return ret;
+               }
+               break;
+       default:
+               dev_err(info->dev, "Unknown VBUS switch type (%d)\n", vbus_sw);
+               return -EINVAL;
+       };
+
+       return 0;
+}
+
+/* Return cable type of attached or detached accessories */
+static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
+{
+       unsigned int cable_type = -1, adc, dev_type1;
+       int ret;
+
+       /* Read ADC value according to external cable or button */
+       ret = regmap_read(info->regmap, SM5502_REG_ADC, &adc);
+       if (ret) {
+               dev_err(info->dev, "failed to read ADC register\n");
+               return ret;
+       }
+
+       /*
+        * If ADC is SM5502_MUIC_ADC_GROUND(0x0), external cable hasn't
+        * connected with to MUIC device.
+        */
+       cable_type &= SM5502_REG_ADC_MASK;
+       if (cable_type == SM5502_MUIC_ADC_GROUND)
+               return SM5502_MUIC_ADC_GROUND;
+
+       switch (cable_type) {
+       case SM5502_MUIC_ADC_GROUND:
+       case SM5502_MUIC_ADC_SEND_END_BUTTON:
+       case SM5502_MUIC_ADC_REMOTE_S1_BUTTON:
+       case SM5502_MUIC_ADC_REMOTE_S2_BUTTON:
+       case SM5502_MUIC_ADC_REMOTE_S3_BUTTON:
+       case SM5502_MUIC_ADC_REMOTE_S4_BUTTON:
+       case SM5502_MUIC_ADC_REMOTE_S5_BUTTON:
+       case SM5502_MUIC_ADC_REMOTE_S6_BUTTON:
+       case SM5502_MUIC_ADC_REMOTE_S7_BUTTON:
+       case SM5502_MUIC_ADC_REMOTE_S8_BUTTON:
+       case SM5502_MUIC_ADC_REMOTE_S9_BUTTON:
+       case SM5502_MUIC_ADC_REMOTE_S10_BUTTON:
+       case SM5502_MUIC_ADC_REMOTE_S11_BUTTON:
+       case SM5502_MUIC_ADC_REMOTE_S12_BUTTON:
+       case SM5502_MUIC_ADC_RESERVED_ACC_1:
+       case SM5502_MUIC_ADC_RESERVED_ACC_2:
+       case SM5502_MUIC_ADC_RESERVED_ACC_3:
+       case SM5502_MUIC_ADC_RESERVED_ACC_4:
+       case SM5502_MUIC_ADC_RESERVED_ACC_5:
+       case SM5502_MUIC_ADC_AUDIO_TYPE2:
+       case SM5502_MUIC_ADC_PHONE_POWERED_DEV:
+       case SM5502_MUIC_ADC_TTY_CONVERTER:
+       case SM5502_MUIC_ADC_UART_CABLE:
+       case SM5502_MUIC_ADC_TYPE1_CHARGER:
+       case SM5502_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB:
+       case SM5502_MUIC_ADC_FACTORY_MODE_BOOT_ON_USB:
+       case SM5502_MUIC_ADC_AUDIO_VIDEO_CABLE:
+       case SM5502_MUIC_ADC_TYPE2_CHARGER:
+       case SM5502_MUIC_ADC_FACTORY_MODE_BOOT_OFF_UART:
+       case SM5502_MUIC_ADC_FACTORY_MODE_BOOT_ON_UART:
+               break;
+       case SM5502_MUIC_ADC_AUDIO_TYPE1:
+               /*
+                * Check whether cable type is
+                * SM5502_MUIC_ADC_AUDIO_TYPE1_FULL_REMOTE
+                * or SM5502_MUIC_ADC_AUDIO_TYPE1_SEND_END
+                * by using Button event.
+                */
+               break;
+       case SM5502_MUIC_ADC_OPEN:
+               ret = regmap_read(info->regmap, SM5502_REG_DEV_TYPE1,
+                                 &dev_type1);
+               if (ret) {
+                       dev_err(info->dev, "failed to read DEV_TYPE1 reg\n");
+                       return ret;
+               }
+
+               switch (dev_type1) {
+               case SM5502_REG_DEV_TYPE1_USB_SDP_MASK:
+                       cable_type = SM5502_MUIC_ADC_OPEN_USB;
+                       break;
+               case SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK:
+                       cable_type = SM5502_MUIC_ADC_OPEN_TA;
+                       break;
+               case SM5502_REG_DEV_TYPE1_USB_OTG_MASK:
+                       cable_type = SM5502_MUIC_ADC_OPEN_USB_OTG;
+                       break;
+               default:
+                       dev_dbg(info->dev,
+                               "cannot identify the cable type: adc(0x%x) "
+                               "dev_type1(0x%x)\n", adc, dev_type1);
+                       return -EINVAL;
+               };
+               break;
+       default:
+               dev_err(info->dev,
+                       "failed to identify the cable type: adc(0x%x)\n", adc);
+               return -EINVAL;
+       };
+
+       return cable_type;
+}
+
+static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
+                                    bool attached)
+{
+       static unsigned int prev_cable_type = SM5502_MUIC_ADC_GROUND;
+       const char **cable_names = info->edev->supported_cable;
+       unsigned int cable_type = SM5502_MUIC_ADC_GROUND;
+       unsigned int con_sw = DM_DP_SWITCH_OPEN;
+       unsigned int vbus_sw = VBUSIN_SWITCH_OPEN;
+       unsigned int idx = 0;
+       int ret;
+
+       if (!cable_names)
+               return 0;
+
+       /* Get the type of attached or detached cable */
+       if (attached)
+               cable_type = sm5502_muic_get_cable_type(info);
+       else if (!attached)
+               cable_type = prev_cable_type;
+       prev_cable_type = cable_type;
+
+       switch (cable_type) {
+       case SM5502_MUIC_ADC_OPEN_USB:
+               idx     = EXTCON_CABLE_USB;
+               con_sw  = DM_DP_SWITCH_USB;
+               vbus_sw = VBUSIN_SWITCH_VBUSOUT_WITH_USB;
+               break;
+       case SM5502_MUIC_ADC_OPEN_TA:
+               idx     = EXTCON_CABLE_TA;
+               con_sw  = DM_DP_SWITCH_OPEN;
+               vbus_sw = VBUSIN_SWITCH_VBUSOUT;
+               break;
+       case SM5502_MUIC_ADC_OPEN_USB_OTG:
+               idx     = EXTCON_CABLE_USB_HOST;
+               con_sw  = DM_DP_SWITCH_USB;
+               vbus_sw = VBUSIN_SWITCH_OPEN;
+               break;
+       default:
+               dev_dbg(info->dev,
+                       "cannot handle this cable_type (0x%x)\n", cable_type);
+               return 0;
+       };
+
+       /* Change internal hardware path(DM_CON/DP_CON, VBUSIN) */
+       ret = sm5502_muic_set_path(info, con_sw, vbus_sw, attached);
+       if (ret < 0)
+               return ret;
+
+       /* Change the state of external accessory */
+       extcon_set_cable_state(info->edev, cable_names[idx], attached);
+
+       return 0;
+}
+
+static void sm5502_muic_irq_work(struct work_struct *work)
+{
+       struct sm5502_muic_info *info = container_of(work,
+                       struct sm5502_muic_info, irq_work);
+       int ret = 0;
+
+       if (!info->edev)
+               return;
+
+       mutex_lock(&info->mutex);
+
+       /* Detect attached or detached cables */
+       if (info->irq_attach) {
+               ret = sm5502_muic_cable_handler(info, true);
+               info->irq_attach = false;
+       }
+       if (info->irq_detach) {
+               ret = sm5502_muic_cable_handler(info, false);
+               info->irq_detach = false;
+       }
+
+       if (ret < 0)
+               dev_err(info->dev, "failed to handle MUIC interrupt\n");
+
+       mutex_unlock(&info->mutex);
+
+       return;
+}
+
+/*
+ * Sets irq_attach or irq_detach in sm5502_muic_info and returns 0.
+ * Returns -ESRCH if irq_type does not match registered IRQ for this dev type.
+ */
+static int sm5502_parse_irq(struct sm5502_muic_info *info, int irq_type)
+{
+       switch (irq_type) {
+       case SM5502_IRQ_INT1_ATTACH:
+               info->irq_attach = true;
+               break;
+       case SM5502_IRQ_INT1_DETACH:
+               info->irq_detach = true;
+               break;
+       case SM5502_IRQ_INT1_KP:
+       case SM5502_IRQ_INT1_LKP:
+       case SM5502_IRQ_INT1_LKR:
+       case SM5502_IRQ_INT1_OVP_EVENT:
+       case SM5502_IRQ_INT1_OCP_EVENT:
+       case SM5502_IRQ_INT1_OVP_OCP_DIS:
+       case SM5502_IRQ_INT2_VBUS_DET:
+       case SM5502_IRQ_INT2_REV_ACCE:
+       case SM5502_IRQ_INT2_ADC_CHG:
+       case SM5502_IRQ_INT2_STUCK_KEY:
+       case SM5502_IRQ_INT2_STUCK_KEY_RCV:
+       case SM5502_IRQ_INT2_MHL:
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static irqreturn_t sm5502_muic_irq_handler(int irq, void *data)
+{
+       struct sm5502_muic_info *info = data;
+       int i, irq_type = -1, ret;
+
+       for (i = 0; i < info->num_muic_irqs; i++)
+               if (irq == info->muic_irqs[i].virq)
+                       irq_type = info->muic_irqs[i].irq;
+
+       ret = sm5502_parse_irq(info, irq_type);
+       if (ret < 0) {
+               dev_warn(info->dev, "cannot handle is interrupt:%d\n",
+                                   irq_type);
+               return IRQ_HANDLED;
+       }
+       schedule_work(&info->irq_work);
+
+       return IRQ_HANDLED;
+}
+
+static void sm5502_muic_detect_cable_wq(struct work_struct *work)
+{
+       struct sm5502_muic_info *info = container_of(to_delayed_work(work),
+                               struct sm5502_muic_info, wq_detcable);
+       int ret;
+
+       /* Notify the state of connector cable or not  */
+       ret = sm5502_muic_cable_handler(info, true);
+       if (ret < 0)
+               dev_warn(info->dev, "failed to detect cable state\n");
+}
+
+static void sm5502_init_dev_type(struct sm5502_muic_info *info)
+{
+       unsigned int reg_data, vendor_id, version_id;
+       int i, ret;
+
+       /* To test I2C, Print version_id and vendor_id of SM5502 */
+       ret = regmap_read(info->regmap, SM5502_REG_DEVICE_ID, &reg_data);
+       if (ret) {
+               dev_err(info->dev,
+                       "failed to read DEVICE_ID register: %d\n", ret);
+               return;
+       }
+
+       vendor_id = ((reg_data & SM5502_REG_DEVICE_ID_VENDOR_MASK) >>
+                               SM5502_REG_DEVICE_ID_VENDOR_SHIFT);
+       version_id = ((reg_data & SM5502_REG_DEVICE_ID_VERSION_MASK) >>
+                               SM5502_REG_DEVICE_ID_VERSION_SHIFT);
+
+       dev_info(info->dev, "Device type: version: 0x%x, vendor: 0x%x\n",
+                           version_id, vendor_id);
+
+       /* Initiazle the register of SM5502 device to bring-up */
+       for (i = 0; i < info->num_reg_data; i++) {
+               unsigned int val = 0;
+
+               if (!info->reg_data[i].invert)
+                       val |= ~info->reg_data[i].val;
+               else
+                       val = info->reg_data[i].val;
+               regmap_write(info->regmap, info->reg_data[i].reg, val);
+       }
+}
+
+static int sm5022_muic_i2c_probe(struct i2c_client *i2c,
+                                const struct i2c_device_id *id)
+{
+       struct device_node *np = i2c->dev.of_node;
+       struct sm5502_muic_info *info;
+       int i, ret, irq_flags;
+
+       if (!np)
+               return -EINVAL;
+
+       info = devm_kzalloc(&i2c->dev, sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+       i2c_set_clientdata(i2c, info);
+
+       info->dev = &i2c->dev;
+       info->i2c = i2c;
+       info->irq = i2c->irq;
+       info->muic_irqs = sm5502_muic_irqs;
+       info->num_muic_irqs = ARRAY_SIZE(sm5502_muic_irqs);
+       info->reg_data = sm5502_reg_data;
+       info->num_reg_data = ARRAY_SIZE(sm5502_reg_data);
+
+       mutex_init(&info->mutex);
+
+       INIT_WORK(&info->irq_work, sm5502_muic_irq_work);
+
+       info->regmap = devm_regmap_init_i2c(i2c, &sm5502_muic_regmap_config);
+       if (IS_ERR(info->regmap)) {
+               ret = PTR_ERR(info->regmap);
+               dev_err(info->dev, "failed to allocate register map: %d\n",
+                                  ret);
+               return ret;
+       }
+
+       /* Support irq domain for SM5502 MUIC device */
+       irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT | IRQF_SHARED;
+       ret = regmap_add_irq_chip(info->regmap, info->irq, irq_flags, 0,
+                                 &sm5502_muic_irq_chip, &info->irq_data);
+       if (ret != 0) {
+               dev_err(info->dev, "failed to request IRQ %d: %d\n",
+                                   info->irq, ret);
+               return ret;
+       }
+
+       for (i = 0; i < info->num_muic_irqs; i++) {
+               struct muic_irq *muic_irq = &info->muic_irqs[i];
+               unsigned int virq = 0;
+
+               virq = regmap_irq_get_virq(info->irq_data, muic_irq->irq);
+               if (virq <= 0)
+                       return -EINVAL;
+               muic_irq->virq = virq;
+
+               ret = devm_request_threaded_irq(info->dev, virq, NULL,
+                                               sm5502_muic_irq_handler,
+                                               IRQF_NO_SUSPEND,
+                                               muic_irq->name, info);
+               if (ret) {
+                       dev_err(info->dev, "failed: irq request (IRQ: %d,"
+                               " error :%d)\n", muic_irq->irq, ret);
+                       return ret;
+               }
+       }
+
+       /* Allocate extcon device */
+       info->edev = devm_extcon_dev_allocate(info->dev, sm5502_extcon_cable);
+       if (IS_ERR(info->edev)) {
+               dev_err(info->dev, "failed to allocate memory for extcon\n");
+               return -ENOMEM;
+       }
+       info->edev->name = np->name;
+
+       /* Register extcon device */
+       ret = devm_extcon_dev_register(info->dev, info->edev);
+       if (ret) {
+               dev_err(info->dev, "failed to register extcon device\n");
+               return ret;
+       }
+
+       /*
+        * Detect accessory after completing the initialization of platform
+        *
+        * - Use delayed workqueue to detect cable state and then
+        * notify cable state to notifiee/platform through uevent.
+        * After completing the booting of platform, the extcon provider
+        * driver should notify cable state to upper layer.
+        */
+       INIT_DELAYED_WORK(&info->wq_detcable, sm5502_muic_detect_cable_wq);
+       queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
+                       msecs_to_jiffies(DELAY_MS_DEFAULT));
+
+       /* Initialize SM5502 device and print vendor id and version id */
+       sm5502_init_dev_type(info);
+
+       return 0;
+}
+
+static int sm5502_muic_i2c_remove(struct i2c_client *i2c)
+{
+       struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
+
+       regmap_del_irq_chip(info->irq, info->irq_data);
+
+       return 0;
+}
+
+static struct of_device_id sm5502_dt_match[] = {
+       { .compatible = "siliconmitus,sm5502-muic" },
+       { },
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int sm5502_muic_suspend(struct device *dev)
+{
+       struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+       struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
+
+       enable_irq_wake(info->irq);
+
+       return 0;
+}
+
+static int sm5502_muic_resume(struct device *dev)
+{
+       struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+       struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
+
+       disable_irq_wake(info->irq);
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(sm5502_muic_pm_ops,
+                        sm5502_muic_suspend, sm5502_muic_resume);
+
+static const struct i2c_device_id sm5502_i2c_id[] = {
+       { "sm5502", TYPE_SM5502 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, sm5502_i2c_id);
+
+static struct i2c_driver sm5502_muic_i2c_driver = {
+       .driver         = {
+               .name   = "sm5502",
+               .owner  = THIS_MODULE,
+               .pm     = &sm5502_muic_pm_ops,
+               .of_match_table = sm5502_dt_match,
+       },
+       .probe  = sm5022_muic_i2c_probe,
+       .remove = sm5502_muic_i2c_remove,
+       .id_table = sm5502_i2c_id,
+};
+
+static int __init sm5502_muic_i2c_init(void)
+{
+       return i2c_add_driver(&sm5502_muic_i2c_driver);
+}
+subsys_initcall(sm5502_muic_i2c_init);
+
+MODULE_DESCRIPTION("Silicon Mitus SM5502 Extcon driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL");
index 284cf66..531a593 100644 (file)
@@ -808,12 +808,8 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
 
        *buffer_actual_len = packetlen;
 
-       if (packetlen > bufferlen) {
-               pr_err("Buffer too small - needed %d bytes but "
-                       "got space for only %d bytes\n",
-                       packetlen, bufferlen);
+       if (packetlen > bufferlen)
                return -ENOBUFS;
-       }
 
        *requestid = desc.trans_id;
 
index c276fde..de5e321 100644 (file)
@@ -618,7 +618,7 @@ static void tpci200_pci_remove(struct pci_dev *dev)
        __tpci200_pci_remove(tpci200);
 }
 
-static DEFINE_PCI_DEVICE_TABLE(tpci200_idtable) = {
+static const struct pci_device_id tpci200_idtable[] = {
        { TPCI200_VENDOR_ID, TPCI200_DEVICE_ID, TPCI200_SUBVENDOR_ID,
          TPCI200_SUBDEVICE_ID },
        { 0, },
index 141094e..e41bef0 100644 (file)
@@ -177,19 +177,20 @@ static void ipoctal_irq_tx(struct ipoctal_channel *channel)
        if (channel->nb_bytes == 0)
                return;
 
+       spin_lock(&channel->lock);
        value = channel->tty_port.xmit_buf[*pointer_write];
        iowrite8(value, &channel->regs->w.thr);
        channel->stats.tx++;
        (*pointer_write)++;
        *pointer_write = *pointer_write % PAGE_SIZE;
        channel->nb_bytes--;
+       spin_unlock(&channel->lock);
 }
 
 static void ipoctal_irq_channel(struct ipoctal_channel *channel)
 {
        u8 isr, sr;
 
-       spin_lock(&channel->lock);
        /* The HW is organized in pair of channels.  See which register we need
         * to read from */
        isr = ioread8(&channel->block_regs->r.isr);
@@ -213,8 +214,6 @@ static void ipoctal_irq_channel(struct ipoctal_channel *channel)
        /* TX of each character */
        if ((isr & channel->isr_tx_rdy_mask) && (sr & SR_TX_READY))
                ipoctal_irq_tx(channel);
-
-       spin_unlock(&channel->lock);
 }
 
 static irqreturn_t ipoctal_irq_handler(void *arg)
@@ -324,13 +323,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
                         &block_regs[i].w.imr);
        }
 
-       /*
-        * IP-OCTAL has different addresses to copy its IRQ vector.
-        * Depending of the carrier these addresses are accesible or not.
-        * More info in the datasheet.
-        */
-       ipoctal->dev->bus->ops->request_irq(ipoctal->dev,
-                                      ipoctal_irq_handler, ipoctal);
        /* Dummy write */
        iowrite8(1, ipoctal->mem8_space + 1);
 
@@ -391,6 +383,14 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
                dev_set_drvdata(tty_dev, channel);
        }
 
+       /*
+        * IP-OCTAL has different addresses to copy its IRQ vector.
+        * Depending of the carrier these addresses are accesible or not.
+        * More info in the datasheet.
+        */
+       ipoctal->dev->bus->ops->request_irq(ipoctal->dev,
+                                      ipoctal_irq_handler, ipoctal);
+
        return 0;
 }
 
index 6cc4b6a..fb824f5 100644 (file)
@@ -384,6 +384,7 @@ config MFD_MAX77693
        depends on I2C=y
        select MFD_CORE
        select REGMAP_I2C
+       select REGMAP_IRQ
        help
          Say yes here to add support for Maxim Semiconductor MAX77693.
          This is a companion Power Management IC with Flash, Haptic, Charger,
index 8afedba..8c6e7bb 100644 (file)
@@ -116,7 +116,7 @@ obj-$(CONFIG_MFD_DA9063)    += da9063.o
 
 obj-$(CONFIG_MFD_MAX14577)     += max14577.o
 obj-$(CONFIG_MFD_MAX77686)     += max77686.o max77686-irq.o
-obj-$(CONFIG_MFD_MAX77693)     += max77693.o max77693-irq.o
+obj-$(CONFIG_MFD_MAX77693)     += max77693.o
 obj-$(CONFIG_MFD_MAX8907)      += max8907.o
 max8925-objs                   := max8925-core.o max8925-i2c.o
 obj-$(CONFIG_MFD_MAX8925)      += max8925.o
diff --git a/drivers/mfd/max77693-irq.c b/drivers/mfd/max77693-irq.c
deleted file mode 100644 (file)
index 66b58fe..0000000
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * max77693-irq.c - Interrupt controller support for MAX77693
- *
- * Copyright (C) 2012 Samsung Electronics Co.Ltd
- * SangYoung Son <hello.son@samsung.com>
- *
- * This program is not provided / owned by Maxim Integrated Products.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- * This driver is based on max8997-irq.c
- */
-
-#include <linux/err.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/irqdomain.h>
-#include <linux/mfd/max77693.h>
-#include <linux/mfd/max77693-private.h>
-
-static const u8 max77693_mask_reg[] = {
-       [LED_INT] = MAX77693_LED_REG_FLASH_INT_MASK,
-       [TOPSYS_INT] = MAX77693_PMIC_REG_TOPSYS_INT_MASK,
-       [CHG_INT] = MAX77693_CHG_REG_CHG_INT_MASK,
-       [MUIC_INT1] = MAX77693_MUIC_REG_INTMASK1,
-       [MUIC_INT2] = MAX77693_MUIC_REG_INTMASK2,
-       [MUIC_INT3] = MAX77693_MUIC_REG_INTMASK3,
-};
-
-static struct regmap *max77693_get_regmap(struct max77693_dev *max77693,
-                               enum max77693_irq_source src)
-{
-       switch (src) {
-       case LED_INT ... CHG_INT:
-               return max77693->regmap;
-       case MUIC_INT1 ... MUIC_INT3:
-               return max77693->regmap_muic;
-       default:
-               return ERR_PTR(-EINVAL);
-       }
-}
-
-struct max77693_irq_data {
-       int mask;
-       enum max77693_irq_source group;
-};
-
-#define DECLARE_IRQ(idx, _group, _mask)                \
-       [(idx)] = { .group = (_group), .mask = (_mask) }
-static const struct max77693_irq_data max77693_irqs[] = {
-       DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_OPEN,        LED_INT, 1 << 0),
-       DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_SHORT,       LED_INT, 1 << 1),
-       DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_OPEN,        LED_INT, 1 << 2),
-       DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_SHORT,       LED_INT, 1 << 3),
-       DECLARE_IRQ(MAX77693_LED_IRQ_MAX_FLASH,         LED_INT, 1 << 4),
-
-       DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T120C_INT,      TOPSYS_INT, 1 << 0),
-       DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T140C_INT,      TOPSYS_INT, 1 << 1),
-       DECLARE_IRQ(MAX77693_TOPSYS_IRQ_LOWSYS_INT,     TOPSYS_INT, 1 << 3),
-
-       DECLARE_IRQ(MAX77693_CHG_IRQ_BYP_I,             CHG_INT, 1 << 0),
-       DECLARE_IRQ(MAX77693_CHG_IRQ_THM_I,             CHG_INT, 1 << 2),
-       DECLARE_IRQ(MAX77693_CHG_IRQ_BAT_I,             CHG_INT, 1 << 3),
-       DECLARE_IRQ(MAX77693_CHG_IRQ_CHG_I,             CHG_INT, 1 << 4),
-       DECLARE_IRQ(MAX77693_CHG_IRQ_CHGIN_I,           CHG_INT, 1 << 6),
-
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC,         MUIC_INT1, 1 << 0),
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_LOW,     MUIC_INT1, 1 << 1),
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_ERR,     MUIC_INT1, 1 << 2),
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC1K,       MUIC_INT1, 1 << 3),
-
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGTYP,      MUIC_INT2, 1 << 0),
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGDETREUN,  MUIC_INT2, 1 << 1),
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DCDTMR,      MUIC_INT2, 1 << 2),
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DXOVP,       MUIC_INT2, 1 << 3),
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VBVOLT,      MUIC_INT2, 1 << 4),
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VIDRM,       MUIC_INT2, 1 << 5),
-
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_EOC,         MUIC_INT3, 1 << 0),
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CGMBC,       MUIC_INT3, 1 << 1),
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_OVP,         MUIC_INT3, 1 << 2),
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR,  MUIC_INT3, 1 << 3),
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CHG_ENABLED, MUIC_INT3, 1 << 4),
-       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_BAT_DET,     MUIC_INT3, 1 << 5),
-};
-
-static void max77693_irq_lock(struct irq_data *data)
-{
-       struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
-
-       mutex_lock(&max77693->irqlock);
-}
-
-static void max77693_irq_sync_unlock(struct irq_data *data)
-{
-       struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
-       int i;
-
-       for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
-               u8 mask_reg = max77693_mask_reg[i];
-               struct regmap *map = max77693_get_regmap(max77693, i);
-
-               if (mask_reg == MAX77693_REG_INVALID ||
-                               IS_ERR_OR_NULL(map))
-                       continue;
-               max77693->irq_masks_cache[i] = max77693->irq_masks_cur[i];
-
-               max77693_write_reg(map, max77693_mask_reg[i],
-                               max77693->irq_masks_cur[i]);
-       }
-
-       mutex_unlock(&max77693->irqlock);
-}
-
-static const inline struct max77693_irq_data *
-irq_to_max77693_irq(struct max77693_dev *max77693, int irq)
-{
-       struct irq_data *data = irq_get_irq_data(irq);
-       return &max77693_irqs[data->hwirq];
-}
-
-static void max77693_irq_mask(struct irq_data *data)
-{
-       struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
-       const struct max77693_irq_data *irq_data =
-                               irq_to_max77693_irq(max77693, data->irq);
-
-       if (irq_data->group >= MAX77693_IRQ_GROUP_NR)
-               return;
-
-       if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
-               max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
-       else
-               max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
-}
-
-static void max77693_irq_unmask(struct irq_data *data)
-{
-       struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
-       const struct max77693_irq_data *irq_data =
-           irq_to_max77693_irq(max77693, data->irq);
-
-       if (irq_data->group >= MAX77693_IRQ_GROUP_NR)
-               return;
-
-       if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
-               max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
-       else
-               max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
-}
-
-static struct irq_chip max77693_irq_chip = {
-       .name                   = "max77693",
-       .irq_bus_lock           = max77693_irq_lock,
-       .irq_bus_sync_unlock    = max77693_irq_sync_unlock,
-       .irq_mask               = max77693_irq_mask,
-       .irq_unmask             = max77693_irq_unmask,
-};
-
-#define MAX77693_IRQSRC_CHG            (1 << 0)
-#define MAX77693_IRQSRC_TOP            (1 << 1)
-#define MAX77693_IRQSRC_FLASH          (1 << 2)
-#define MAX77693_IRQSRC_MUIC           (1 << 3)
-static irqreturn_t max77693_irq_thread(int irq, void *data)
-{
-       struct max77693_dev *max77693 = data;
-       u8 irq_reg[MAX77693_IRQ_GROUP_NR] = {};
-       u8 irq_src;
-       int ret;
-       int i, cur_irq;
-
-       ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_INTSRC,
-                               &irq_src);
-       if (ret < 0) {
-               dev_err(max77693->dev, "Failed to read interrupt source: %d\n",
-                               ret);
-               return IRQ_NONE;
-       }
-
-       if (irq_src & MAX77693_IRQSRC_CHG)
-               /* CHG_INT */
-               ret = max77693_read_reg(max77693->regmap, MAX77693_CHG_REG_CHG_INT,
-                               &irq_reg[CHG_INT]);
-
-       if (irq_src & MAX77693_IRQSRC_TOP)
-               /* TOPSYS_INT */
-               ret = max77693_read_reg(max77693->regmap,
-                       MAX77693_PMIC_REG_TOPSYS_INT, &irq_reg[TOPSYS_INT]);
-
-       if (irq_src & MAX77693_IRQSRC_FLASH)
-               /* LED_INT */
-               ret = max77693_read_reg(max77693->regmap,
-                       MAX77693_LED_REG_FLASH_INT, &irq_reg[LED_INT]);
-
-       if (irq_src & MAX77693_IRQSRC_MUIC)
-               /* MUIC INT1 ~ INT3 */
-               max77693_bulk_read(max77693->regmap_muic, MAX77693_MUIC_REG_INT1,
-                       MAX77693_NUM_IRQ_MUIC_REGS, &irq_reg[MUIC_INT1]);
-
-       /* Apply masking */
-       for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
-               if (i >= MUIC_INT1 && i <= MUIC_INT3)
-                       irq_reg[i] &= max77693->irq_masks_cur[i];
-               else
-                       irq_reg[i] &= ~max77693->irq_masks_cur[i];
-       }
-
-       /* Report */
-       for (i = 0; i < MAX77693_IRQ_NR; i++) {
-               if (irq_reg[max77693_irqs[i].group] & max77693_irqs[i].mask) {
-                       cur_irq = irq_find_mapping(max77693->irq_domain, i);
-                       if (cur_irq)
-                               handle_nested_irq(cur_irq);
-               }
-       }
-
-       return IRQ_HANDLED;
-}
-
-int max77693_irq_resume(struct max77693_dev *max77693)
-{
-       if (max77693->irq)
-               max77693_irq_thread(0, max77693);
-
-       return 0;
-}
-
-static int max77693_irq_domain_map(struct irq_domain *d, unsigned int irq,
-                               irq_hw_number_t hw)
-{
-       struct max77693_dev *max77693 = d->host_data;
-
-       irq_set_chip_data(irq, max77693);
-       irq_set_chip_and_handler(irq, &max77693_irq_chip, handle_edge_irq);
-       irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-       set_irq_flags(irq, IRQF_VALID);
-#else
-       irq_set_noprobe(irq);
-#endif
-       return 0;
-}
-
-static struct irq_domain_ops max77693_irq_domain_ops = {
-       .map = max77693_irq_domain_map,
-};
-
-int max77693_irq_init(struct max77693_dev *max77693)
-{
-       struct irq_domain *domain;
-       int i;
-       int ret = 0;
-       u8 intsrc_mask;
-
-       mutex_init(&max77693->irqlock);
-
-       /* Mask individual interrupt sources */
-       for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
-               struct regmap *map;
-               /* MUIC IRQ  0:MASK 1:NOT MASK */
-               /* Other IRQ 1:MASK 0:NOT MASK */
-               if (i >= MUIC_INT1 && i <= MUIC_INT3) {
-                       max77693->irq_masks_cur[i] = 0x00;
-                       max77693->irq_masks_cache[i] = 0x00;
-               } else {
-                       max77693->irq_masks_cur[i] = 0xff;
-                       max77693->irq_masks_cache[i] = 0xff;
-               }
-               map = max77693_get_regmap(max77693, i);
-
-               if (IS_ERR_OR_NULL(map))
-                       continue;
-               if (max77693_mask_reg[i] == MAX77693_REG_INVALID)
-                       continue;
-               if (i >= MUIC_INT1 && i <= MUIC_INT3)
-                       max77693_write_reg(map, max77693_mask_reg[i], 0x00);
-               else
-                       max77693_write_reg(map, max77693_mask_reg[i], 0xff);
-       }
-
-       domain = irq_domain_add_linear(NULL, MAX77693_IRQ_NR,
-                                       &max77693_irq_domain_ops, max77693);
-       if (!domain) {
-               dev_err(max77693->dev, "could not create irq domain\n");
-               ret = -ENODEV;
-               goto err_irq;
-       }
-       max77693->irq_domain = domain;
-
-       /* Unmask max77693 interrupt */
-       ret = max77693_read_reg(max77693->regmap,
-                       MAX77693_PMIC_REG_INTSRC_MASK, &intsrc_mask);
-       if (ret < 0) {
-               dev_err(max77693->dev, "fail to read PMIC register\n");
-               goto err_irq;
-       }
-
-       intsrc_mask &= ~(MAX77693_IRQSRC_CHG);
-       intsrc_mask &= ~(MAX77693_IRQSRC_FLASH);
-       intsrc_mask &= ~(MAX77693_IRQSRC_MUIC);
-       ret = max77693_write_reg(max77693->regmap,
-                       MAX77693_PMIC_REG_INTSRC_MASK, intsrc_mask);
-       if (ret < 0) {
-               dev_err(max77693->dev, "fail to write PMIC register\n");
-               goto err_irq;
-       }
-
-       ret = request_threaded_irq(max77693->irq, NULL, max77693_irq_thread,
-                                  IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-                                  "max77693-irq", max77693);
-       if (ret)
-               dev_err(max77693->dev, "Failed to request IRQ %d: %d\n",
-                       max77693->irq, ret);
-
-err_irq:
-       return ret;
-}
-
-void max77693_irq_exit(struct max77693_dev *max77693)
-{
-       if (max77693->irq)
-               free_irq(max77693->irq, max77693);
-}
index 7e05428..249c139 100644 (file)
@@ -49,62 +49,62 @@ static const struct mfd_cell max77693_devs[] = {
        { .name = "max77693-haptic", },
 };
 
-int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest)
-{
-       unsigned int val;
-       int ret;
-
-       ret = regmap_read(map, reg, &val);
-       *dest = val;
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(max77693_read_reg);
-
-int max77693_bulk_read(struct regmap *map, u8 reg, int count, u8 *buf)
-{
-       int ret;
-
-       ret = regmap_bulk_read(map, reg, buf, count);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(max77693_bulk_read);
-
-int max77693_write_reg(struct regmap *map, u8 reg, u8 value)
-{
-       int ret;
-
-       ret = regmap_write(map, reg, value);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(max77693_write_reg);
-
-int max77693_bulk_write(struct regmap *map, u8 reg, int count, u8 *buf)
-{
-       int ret;
+static const struct regmap_config max77693_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = MAX77693_PMIC_REG_END,
+};
 
-       ret = regmap_bulk_write(map, reg, buf, count);
+static const struct regmap_irq max77693_led_irqs[] = {
+       { .mask = LED_IRQ_FLED2_OPEN,  },
+       { .mask = LED_IRQ_FLED2_SHORT, },
+       { .mask = LED_IRQ_FLED1_OPEN,  },
+       { .mask = LED_IRQ_FLED1_SHORT, },
+       { .mask = LED_IRQ_MAX_FLASH,   },
+};
 
-       return ret;
-}
-EXPORT_SYMBOL_GPL(max77693_bulk_write);
+static const struct regmap_irq_chip max77693_led_irq_chip = {
+       .name                   = "max77693-led",
+       .status_base            = MAX77693_LED_REG_FLASH_INT,
+       .mask_base              = MAX77693_LED_REG_FLASH_INT_MASK,
+       .mask_invert            = false,
+       .num_regs               = 1,
+       .irqs                   = max77693_led_irqs,
+       .num_irqs               = ARRAY_SIZE(max77693_led_irqs),
+};
 
-int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask)
-{
-       int ret;
+static const struct regmap_irq max77693_topsys_irqs[] = {
+       { .mask = TOPSYS_IRQ_T120C_INT,  },
+       { .mask = TOPSYS_IRQ_T140C_INT,  },
+       { .mask = TOPSYS_IRQ_LOWSYS_INT, },
+};
 
-       ret = regmap_update_bits(map, reg, mask, val);
+static const struct regmap_irq_chip max77693_topsys_irq_chip = {
+       .name                   = "max77693-topsys",
+       .status_base            = MAX77693_PMIC_REG_TOPSYS_INT,
+       .mask_base              = MAX77693_PMIC_REG_TOPSYS_INT_MASK,
+       .mask_invert            = false,
+       .num_regs               = 1,
+       .irqs                   = max77693_topsys_irqs,
+       .num_irqs               = ARRAY_SIZE(max77693_topsys_irqs),
+};
 
-       return ret;
-}
-EXPORT_SYMBOL_GPL(max77693_update_reg);
+static const struct regmap_irq max77693_charger_irqs[] = {
+       { .mask = CHG_IRQ_BYP_I,   },
+       { .mask = CHG_IRQ_THM_I,   },
+       { .mask = CHG_IRQ_BAT_I,   },
+       { .mask = CHG_IRQ_CHG_I,   },
+       { .mask = CHG_IRQ_CHGIN_I, },
+};
 
-static const struct regmap_config max77693_regmap_config = {
-       .reg_bits = 8,
-       .val_bits = 8,
-       .max_register = MAX77693_PMIC_REG_END,
+static const struct regmap_irq_chip max77693_charger_irq_chip = {
+       .name                   = "max77693-charger",
+       .status_base            = MAX77693_CHG_REG_CHG_INT,
+       .mask_base              = MAX77693_CHG_REG_CHG_INT_MASK,
+       .mask_invert            = false,
+       .num_regs               = 1,
+       .irqs                   = max77693_charger_irqs,
+       .num_irqs               = ARRAY_SIZE(max77693_charger_irqs),
 };
 
 static const struct regmap_config max77693_regmap_muic_config = {
@@ -113,11 +113,42 @@ static const struct regmap_config max77693_regmap_muic_config = {
        .max_register = MAX77693_MUIC_REG_END,
 };
 
+static const struct regmap_irq max77693_muic_irqs[] = {
+       { .reg_offset = 0, .mask = MUIC_IRQ_INT1_ADC,           },
+       { .reg_offset = 0, .mask = MUIC_IRQ_INT1_ADC_LOW,       },
+       { .reg_offset = 0, .mask = MUIC_IRQ_INT1_ADC_ERR,       },
+       { .reg_offset = 0, .mask = MUIC_IRQ_INT1_ADC1K,         },
+
+       { .reg_offset = 1, .mask = MUIC_IRQ_INT2_CHGTYP,        },
+       { .reg_offset = 1, .mask = MUIC_IRQ_INT2_CHGDETREUN,    },
+       { .reg_offset = 1, .mask = MUIC_IRQ_INT2_DCDTMR,        },
+       { .reg_offset = 1, .mask = MUIC_IRQ_INT2_DXOVP,         },
+       { .reg_offset = 1, .mask = MUIC_IRQ_INT2_VBVOLT,        },
+       { .reg_offset = 1, .mask = MUIC_IRQ_INT2_VIDRM,         },
+
+       { .reg_offset = 2, .mask = MUIC_IRQ_INT3_EOC,           },
+       { .reg_offset = 2, .mask = MUIC_IRQ_INT3_CGMBC,         },
+       { .reg_offset = 2, .mask = MUIC_IRQ_INT3_OVP,           },
+       { .reg_offset = 2, .mask = MUIC_IRQ_INT3_MBCCHG_ERR,    },
+       { .reg_offset = 2, .mask = MUIC_IRQ_INT3_CHG_ENABLED,   },
+       { .reg_offset = 2, .mask = MUIC_IRQ_INT3_BAT_DET,       },
+};
+
+static const struct regmap_irq_chip max77693_muic_irq_chip = {
+       .name                   = "max77693-muic",
+       .status_base            = MAX77693_MUIC_REG_INT1,
+       .mask_base              = MAX77693_MUIC_REG_INTMASK1,
+       .mask_invert            = true,
+       .num_regs               = 3,
+       .irqs                   = max77693_muic_irqs,
+       .num_irqs               = ARRAY_SIZE(max77693_muic_irqs),
+};
+
 static int max77693_i2c_probe(struct i2c_client *i2c,
                              const struct i2c_device_id *id)
 {
        struct max77693_dev *max77693;
-       u8 reg_data;
+       unsigned int reg_data;
        int ret = 0;
 
        max77693 = devm_kzalloc(&i2c->dev,
@@ -139,7 +170,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
                return ret;
        }
 
-       ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2,
+       ret = regmap_read(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2,
                                &reg_data);
        if (ret < 0) {
                dev_err(max77693->dev, "device not found on this channel\n");
@@ -176,9 +207,45 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
                goto err_regmap_muic;
        }
 
-       ret = max77693_irq_init(max77693);
-       if (ret < 0)
-               goto err_irq;
+       ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
+                               IRQF_ONESHOT | IRQF_SHARED |
+                               IRQF_TRIGGER_FALLING, 0,
+                               &max77693_led_irq_chip,
+                               &max77693->irq_data_led);
+       if (ret) {
+               dev_err(max77693->dev, "failed to add irq chip: %d\n", ret);
+               goto err_regmap_muic;
+       }
+
+       ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
+                               IRQF_ONESHOT | IRQF_SHARED |
+                               IRQF_TRIGGER_FALLING, 0,
+                               &max77693_topsys_irq_chip,
+                               &max77693->irq_data_topsys);
+       if (ret) {
+               dev_err(max77693->dev, "failed to add irq chip: %d\n", ret);
+               goto err_irq_topsys;
+       }
+
+       ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
+                               IRQF_ONESHOT | IRQF_SHARED |
+                               IRQF_TRIGGER_FALLING, 0,
+                               &max77693_charger_irq_chip,
+                               &max77693->irq_data_charger);
+       if (ret) {
+               dev_err(max77693->dev, "failed to add irq chip: %d\n", ret);
+               goto err_irq_charger;
+       }
+
+       ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
+                               IRQF_ONESHOT | IRQF_SHARED |
+                               IRQF_TRIGGER_FALLING, 0,
+                               &max77693_muic_irq_chip,
+                               &max77693->irq_data_muic);
+       if (ret) {
+               dev_err(max77693->dev, "failed to add irq chip: %d\n", ret);
+               goto err_irq_muic;
+       }
 
        pm_runtime_set_active(max77693->dev);
 
@@ -190,8 +257,14 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
        return ret;
 
 err_mfd:
-       max77693_irq_exit(max77693);
-err_irq:
+       mfd_remove_devices(max77693->dev);
+       regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
+err_irq_muic:
+       regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
+err_irq_charger:
+       regmap_del_irq_chip(max77693->irq, max77693->irq_data_topsys);
+err_irq_topsys:
+       regmap_del_irq_chip(max77693->irq, max77693->irq_data_led);
 err_regmap_muic:
        i2c_unregister_device(max77693->haptic);
 err_i2c_haptic:
@@ -204,7 +277,12 @@ static int max77693_i2c_remove(struct i2c_client *i2c)
        struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
 
        mfd_remove_devices(max77693->dev);
-       max77693_irq_exit(max77693);
+
+       regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
+       regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
+       regmap_del_irq_chip(max77693->irq, max77693->irq_data_topsys);
+       regmap_del_irq_chip(max77693->irq, max77693->irq_data_led);
+
        i2c_unregister_device(max77693->muic);
        i2c_unregister_device(max77693->haptic);
 
@@ -222,8 +300,11 @@ static int max77693_suspend(struct device *dev)
        struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
        struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
 
-       if (device_may_wakeup(dev))
-               irq_set_irq_wake(max77693->irq, 1);
+       if (device_may_wakeup(dev)) {
+               enable_irq_wake(max77693->irq);
+               disable_irq(max77693->irq);
+       }
+
        return 0;
 }
 
@@ -232,9 +313,12 @@ static int max77693_resume(struct device *dev)
        struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
        struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
 
-       if (device_may_wakeup(dev))
-               irq_set_irq_wake(max77693->irq, 0);
-       return max77693_irq_resume(max77693);
+       if (device_may_wakeup(dev)) {
+               disable_irq_wake(max77693->irq);
+               enable_irq(max77693->irq);
+       }
+
+       return 0;
 }
 
 static const struct dev_pm_ops max77693_pm = {
index 99a0468..7b55f8a 100644 (file)
@@ -1185,7 +1185,7 @@ static int bh1770_probe(struct i2c_client *client,
        struct bh1770_chip *chip;
        int err;
 
-       chip = kzalloc(sizeof *chip, GFP_KERNEL);
+       chip = devm_kzalloc(&client->dev, sizeof *chip, GFP_KERNEL);
        if (!chip)
                return -ENOMEM;
 
@@ -1198,8 +1198,7 @@ static int bh1770_probe(struct i2c_client *client,
 
        if (client->dev.platform_data == NULL) {
                dev_err(&client->dev, "platform data is mandatory\n");
-               err = -EINVAL;
-               goto fail1;
+               return -EINVAL;
        }
 
        chip->pdata             = client->dev.platform_data;
@@ -1224,24 +1223,24 @@ static int bh1770_probe(struct i2c_client *client,
        chip->regs[0].supply = reg_vcc;
        chip->regs[1].supply = reg_vleds;
 
-       err = regulator_bulk_get(&client->dev,
-                                ARRAY_SIZE(chip->regs), chip->regs);
+       err = devm_regulator_bulk_get(&client->dev,
+                                     ARRAY_SIZE(chip->regs), chip->regs);
        if (err < 0) {
                dev_err(&client->dev, "Cannot get regulators\n");
-               goto fail1;
+               return err;
        }
 
        err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
                                chip->regs);
        if (err < 0) {
                dev_err(&client->dev, "Cannot enable regulators\n");
-               goto fail2;
+               return err;
        }
 
        usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
        err = bh1770_detect(chip);
        if (err < 0)
-               goto fail3;
+               goto fail0;
 
        /* Start chip */
        bh1770_chip_on(chip);
@@ -1252,14 +1251,14 @@ static int bh1770_probe(struct i2c_client *client,
        if (chip->lux_corr == 0) {
                dev_err(&client->dev, "Improper correction values\n");
                err = -EINVAL;
-               goto fail3;
+               goto fail0;
        }
 
        if (chip->pdata->setup_resources) {
                err = chip->pdata->setup_resources();
                if (err) {
                        err = -EINVAL;
-                       goto fail3;
+                       goto fail0;
                }
        }
 
@@ -1267,7 +1266,7 @@ static int bh1770_probe(struct i2c_client *client,
                                &bh1770_attribute_group);
        if (err < 0) {
                dev_err(&chip->client->dev, "Sysfs registration failed\n");
-               goto fail4;
+               goto fail1;
        }
 
        /*
@@ -1283,22 +1282,18 @@ static int bh1770_probe(struct i2c_client *client,
        if (err) {
                dev_err(&client->dev, "could not get IRQ %d\n",
                        client->irq);
-               goto fail5;
+               goto fail2;
        }
        regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
        return err;
-fail5:
+fail2:
        sysfs_remove_group(&chip->client->dev.kobj,
                        &bh1770_attribute_group);
-fail4:
+fail1:
        if (chip->pdata->release_resources)
                chip->pdata->release_resources();
-fail3:
+fail0:
        regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
-fail2:
-       regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
-fail1:
-       kfree(chip);
        return err;
 }
 
@@ -1322,8 +1317,6 @@ static int bh1770_remove(struct i2c_client *client)
        pm_runtime_disable(&client->dev);
        pm_runtime_set_suspended(&client->dev);
 
-       regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
-       kfree(chip);
        return 0;
 }
 
index 48ea33d..4c4a59b 100644 (file)
@@ -149,50 +149,35 @@ static int bh1780_probe(struct i2c_client *client,
                                                const struct i2c_device_id *id)
 {
        int ret;
-       struct bh1780_data *ddata = NULL;
+       struct bh1780_data *ddata;
        struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
 
-       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) {
-               ret = -EIO;
-               goto err_op_failed;
-       }
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
+               return -EIO;
 
-       ddata = kzalloc(sizeof(struct bh1780_data), GFP_KERNEL);
-       if (ddata == NULL) {
-               ret = -ENOMEM;
-               goto err_op_failed;
-       }
+       ddata = devm_kzalloc(&client->dev, sizeof(struct bh1780_data),
+                            GFP_KERNEL);
+       if (ddata == NULL)
+               return -ENOMEM;
 
        ddata->client = client;
        i2c_set_clientdata(client, ddata);
 
        ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID");
        if (ret < 0)
-               goto err_op_failed;
+               return ret;
 
        dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n",
                        (ret & BH1780_REVMASK));
 
        mutex_init(&ddata->lock);
 
-       ret = sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
-       if (ret)
-               goto err_op_failed;
-
-       return 0;
-
-err_op_failed:
-       kfree(ddata);
-       return ret;
+       return sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
 }
 
 static int bh1780_remove(struct i2c_client *client)
 {
-       struct bh1780_data *ddata;
-
-       ddata = i2c_get_clientdata(client);
        sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group);
-       kfree(ddata);
 
        return 0;
 }
index 14d90ea..55e913b 100644 (file)
@@ -954,10 +954,7 @@ static int data_debugfs_init(struct fpga_device *priv)
 {
        priv->dbg_entry = debugfs_create_file(drv_name, S_IRUGO, NULL, priv,
                                              &data_debug_fops);
-       if (IS_ERR(priv->dbg_entry))
-               return PTR_ERR(priv->dbg_entry);
-
-       return 0;
+       return PTR_ERR_OR_ZERO(priv->dbg_entry);
 }
 
 static void data_debugfs_exit(struct fpga_device *priv)
index 4d0db15..acbbe03 100644 (file)
@@ -61,3 +61,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jiri Kosina");
 module_param(irq, uint, 0444);
 MODULE_PARM_DESC(irq, "The IRQ to register for");
+MODULE_DESCRIPTION("Dummy IRQ handler driver");
index 6069d8c..4c0a033 100644 (file)
@@ -11,3 +11,9 @@ menuconfig GENWQE
          Enables PCIe card driver for IBM GenWQE accelerators.
          The user-space interface is described in
          include/linux/genwqe/genwqe_card.h.
+
+config GENWQE_PLATFORM_ERROR_RECOVERY
+       int "Use platform recovery procedures (0=off, 1=on)"
+       depends on GENWQE
+       default 1 if PPC64
+       default 0
index 74d51c9..43bbabc 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/notifier.h>
 #include <linux/device.h>
 #include <linux/log2.h>
-#include <linux/genwqe/genwqe_card.h>
 
 #include "card_base.h"
 #include "card_ddcb.h"
@@ -58,7 +57,7 @@ static struct dentry *debugfs_genwqe;
 static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX];
 
 /* PCI structure for identifying device by PCI vendor and device ID */
-static DEFINE_PCI_DEVICE_TABLE(genwqe_device_table) = {
+static const struct pci_device_id genwqe_device_table[] = {
        { .vendor      = PCI_VENDOR_ID_IBM,
          .device      = PCI_DEVICE_GENWQE,
          .subvendor   = PCI_SUBVENDOR_ID_IBM,
@@ -140,6 +139,12 @@ static struct genwqe_dev *genwqe_dev_alloc(void)
        cd->class_genwqe = class_genwqe;
        cd->debugfs_genwqe = debugfs_genwqe;
 
+       /*
+        * This comes from kernel config option and can be overritten via
+        * debugfs.
+        */
+       cd->use_platform_recovery = CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY;
+
        init_waitqueue_head(&cd->queue_waitq);
 
        spin_lock_init(&cd->file_lock);
@@ -760,6 +765,124 @@ static u64 genwqe_fir_checking(struct genwqe_dev *cd)
        return IO_ILLEGAL_VALUE;
 }
 
+/**
+ * genwqe_pci_fundamental_reset() - trigger a PCIe fundamental reset on the slot
+ *
+ * Note: pci_set_pcie_reset_state() is not implemented on all archs, so this
+ * reset method will not work in all cases.
+ *
+ * Return: 0 on success or error code from pci_set_pcie_reset_state()
+ */
+static int genwqe_pci_fundamental_reset(struct pci_dev *pci_dev)
+{
+       int rc;
+
+       /*
+        * lock pci config space access from userspace,
+        * save state and issue PCIe fundamental reset
+        */
+       pci_cfg_access_lock(pci_dev);
+       pci_save_state(pci_dev);
+       rc = pci_set_pcie_reset_state(pci_dev, pcie_warm_reset);
+       if (!rc) {
+               /* keep PCIe reset asserted for 250ms */
+               msleep(250);
+               pci_set_pcie_reset_state(pci_dev, pcie_deassert_reset);
+               /* Wait for 2s to reload flash and train the link */
+               msleep(2000);
+       }
+       pci_restore_state(pci_dev);
+       pci_cfg_access_unlock(pci_dev);
+       return rc;
+}
+
+
+static int genwqe_platform_recovery(struct genwqe_dev *cd)
+{
+       struct pci_dev *pci_dev = cd->pci_dev;
+       int rc;
+
+       dev_info(&pci_dev->dev,
+                "[%s] resetting card for error recovery\n", __func__);
+
+       /* Clear out error injection flags */
+       cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE |
+                           GENWQE_INJECT_GFIR_FATAL |
+                           GENWQE_INJECT_GFIR_INFO);
+
+       genwqe_stop(cd);
+
+       /* Try recoverying the card with fundamental reset */
+       rc = genwqe_pci_fundamental_reset(pci_dev);
+       if (!rc) {
+               rc = genwqe_start(cd);
+               if (!rc)
+                       dev_info(&pci_dev->dev,
+                                "[%s] card recovered\n", __func__);
+               else
+                       dev_err(&pci_dev->dev,
+                               "[%s] err: cannot start card services! (err=%d)\n",
+                               __func__, rc);
+       } else {
+               dev_err(&pci_dev->dev,
+                       "[%s] card reset failed\n", __func__);
+       }
+
+       return rc;
+}
+
+/*
+ * genwqe_reload_bistream() - reload card bitstream
+ *
+ * Set the appropriate register and call fundamental reset to reaload the card
+ * bitstream.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int genwqe_reload_bistream(struct genwqe_dev *cd)
+{
+       struct pci_dev *pci_dev = cd->pci_dev;
+       int rc;
+
+       dev_info(&pci_dev->dev,
+                "[%s] resetting card for bitstream reload\n",
+                __func__);
+
+       genwqe_stop(cd);
+
+       /*
+        * Cause a CPLD reprogram with the 'next_bitstream'
+        * partition on PCIe hot or fundamental reset
+        */
+       __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET,
+                       (cd->softreset & 0xcull) | 0x70ull);
+
+       rc = genwqe_pci_fundamental_reset(pci_dev);
+       if (rc) {
+               /*
+                * A fundamental reset failure can be caused
+                * by lack of support on the arch, so we just
+                * log the error and try to start the card
+                * again.
+                */
+               dev_err(&pci_dev->dev,
+                       "[%s] err: failed to reset card for bitstream reload\n",
+                       __func__);
+       }
+
+       rc = genwqe_start(cd);
+       if (rc) {
+               dev_err(&pci_dev->dev,
+                       "[%s] err: cannot start card services! (err=%d)\n",
+                       __func__, rc);
+               return rc;
+       }
+       dev_info(&pci_dev->dev,
+                "[%s] card reloaded\n", __func__);
+       return 0;
+}
+
+
 /**
  * genwqe_health_thread() - Health checking thread
  *
@@ -786,6 +909,7 @@ static int genwqe_health_thread(void *data)
        struct pci_dev *pci_dev = cd->pci_dev;
        u64 gfir, gfir_masked, slu_unitcfg, app_unitcfg;
 
+ health_thread_begin:
        while (!kthread_should_stop()) {
                rc = wait_event_interruptible_timeout(cd->health_waitq,
                         (genwqe_health_check_cond(cd, &gfir) ||
@@ -846,6 +970,13 @@ static int genwqe_health_thread(void *data)
                        }
                }
 
+               if (cd->card_state == GENWQE_CARD_RELOAD_BITSTREAM) {
+                       /* Userspace requested card bitstream reload */
+                       rc = genwqe_reload_bistream(cd);
+                       if (rc)
+                               goto fatal_error;
+               }
+
                cd->last_gfir = gfir;
                cond_resched();
        }
@@ -853,6 +984,28 @@ static int genwqe_health_thread(void *data)
        return 0;
 
  fatal_error:
+       if (cd->use_platform_recovery) {
+               /*
+                * Since we use raw accessors, EEH errors won't be detected
+                * by the platform until we do a non-raw MMIO or config space
+                * read
+                */
+               readq(cd->mmio + IO_SLC_CFGREG_GFIR);
+
+               /* We do nothing if the card is going over PCI recovery */
+               if (pci_channel_offline(pci_dev))
+                       return -EIO;
+
+               /*
+                * If it's supported by the platform, we try a fundamental reset
+                * to recover from a fatal error. Otherwise, we continue to wait
+                * for an external recovery procedure to take care of it.
+                */
+               rc = genwqe_platform_recovery(cd);
+               if (!rc)
+                       goto health_thread_begin;
+       }
+
        dev_err(&pci_dev->dev,
                "[%s] card unusable. Please trigger unbind!\n", __func__);
 
@@ -958,6 +1111,9 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
        pci_set_master(pci_dev);
        pci_enable_pcie_error_reporting(pci_dev);
 
+       /* EEH recovery requires PCIe fundamental reset */
+       pci_dev->needs_freset = 1;
+
        /* request complete BAR-0 space (length = 0) */
        cd->mmio_len = pci_resource_len(pci_dev, 0);
        cd->mmio = pci_iomap(pci_dev, 0, 0);
@@ -1096,23 +1252,40 @@ static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev,
 
        dev_err(&pci_dev->dev, "[%s] state=%d\n", __func__, state);
 
-       if (pci_dev == NULL)
-               return PCI_ERS_RESULT_NEED_RESET;
-
        cd = dev_get_drvdata(&pci_dev->dev);
        if (cd == NULL)
-               return PCI_ERS_RESULT_NEED_RESET;
+               return PCI_ERS_RESULT_DISCONNECT;
 
-       switch (state) {
-       case pci_channel_io_normal:
-               return PCI_ERS_RESULT_CAN_RECOVER;
-       case pci_channel_io_frozen:
-               return PCI_ERS_RESULT_NEED_RESET;
-       case pci_channel_io_perm_failure:
+       /* Stop the card */
+       genwqe_health_check_stop(cd);
+       genwqe_stop(cd);
+
+       /*
+        * On permanent failure, the PCI code will call device remove
+        * after the return of this function.
+        * genwqe_stop() can be called twice.
+        */
+       if (state == pci_channel_io_perm_failure) {
                return PCI_ERS_RESULT_DISCONNECT;
+       } else {
+               genwqe_pci_remove(cd);
+               return PCI_ERS_RESULT_NEED_RESET;
        }
+}
+
+static pci_ers_result_t genwqe_err_slot_reset(struct pci_dev *pci_dev)
+{
+       int rc;
+       struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
 
-       return PCI_ERS_RESULT_NEED_RESET;
+       rc = genwqe_pci_setup(cd);
+       if (!rc) {
+               return PCI_ERS_RESULT_RECOVERED;
+       } else {
+               dev_err(&pci_dev->dev,
+                       "err: problems with PCI setup (err=%d)\n", rc);
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
 }
 
 static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev)
@@ -1120,8 +1293,22 @@ static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev)
        return PCI_ERS_RESULT_NONE;
 }
 
-static void genwqe_err_resume(struct pci_dev *dev)
+static void genwqe_err_resume(struct pci_dev *pci_dev)
 {
+       int rc;
+       struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
+
+       rc = genwqe_start(cd);
+       if (!rc) {
+               rc = genwqe_health_check_start(cd);
+               if (rc)
+                       dev_err(&pci_dev->dev,
+                               "err: cannot start health checking! (err=%d)\n",
+                               rc);
+       } else {
+               dev_err(&pci_dev->dev,
+                       "err: cannot start card services! (err=%d)\n", rc);
+       }
 }
 
 static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
@@ -1144,7 +1331,7 @@ static struct pci_error_handlers genwqe_err_handler = {
        .error_detected = genwqe_err_error_detected,
        .mmio_enabled   = genwqe_err_result_none,
        .link_reset     = genwqe_err_result_none,
-       .slot_reset     = genwqe_err_result_none,
+       .slot_reset     = genwqe_err_slot_reset,
        .resume         = genwqe_err_resume,
 };
 
index 0e608a2..67abd8c 100644 (file)
@@ -291,6 +291,8 @@ struct genwqe_dev {
        struct task_struct *health_thread;
        wait_queue_head_t health_waitq;
 
+       int use_platform_recovery;      /* use platform recovery mechanisms */
+
        /* char device */
        dev_t  devnum_genwqe;           /* major/minor num card */
        struct class *class_genwqe;     /* reference to class object */
index c8046db..dc9851a 100644 (file)
@@ -1118,7 +1118,21 @@ static irqreturn_t genwqe_pf_isr(int irq, void *dev_id)
         * safer, but slower for the good-case ... See above.
         */
        gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
-       if ((gfir & GFIR_ERR_TRIGGER) != 0x0) {
+       if (((gfir & GFIR_ERR_TRIGGER) != 0x0) &&
+           !pci_channel_offline(pci_dev)) {
+
+               if (cd->use_platform_recovery) {
+                       /*
+                        * Since we use raw accessors, EEH errors won't be
+                        * detected by the platform until we do a non-raw
+                        * MMIO or config space read
+                        */
+                       readq(cd->mmio + IO_SLC_CFGREG_GFIR);
+
+                       /* Don't do anything if the PCI channel is frozen */
+                       if (pci_channel_offline(pci_dev))
+                               goto exit;
+               }
 
                wake_up_interruptible(&cd->health_waitq);
 
@@ -1126,12 +1140,12 @@ static irqreturn_t genwqe_pf_isr(int irq, void *dev_id)
                 * By default GFIRs causes recovery actions. This
                 * count is just for debug when recovery is masked.
                 */
-               printk_ratelimited(KERN_ERR
-                                  "%s %s: [%s] GFIR=%016llx\n",
-                                  GENWQE_DEVNAME, dev_name(&pci_dev->dev),
-                                  __func__, gfir);
+               dev_err_ratelimited(&pci_dev->dev,
+                                   "[%s] GFIR=%016llx\n",
+                                   __func__, gfir);
        }
 
+ exit:
        return IRQ_HANDLED;
 }
 
@@ -1237,9 +1251,7 @@ int genwqe_setup_service_layer(struct genwqe_dev *cd)
        }
 
        rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS);
-       if (rc > 0)
-               rc = genwqe_set_interrupt_capability(cd, rc);
-       if (rc != 0) {
+       if (rc) {
                rc = -ENODEV;
                goto stop_kthread;
        }
index 0a33ade..c9b4d6d 100644 (file)
@@ -485,6 +485,13 @@ int genwqe_init_debugfs(struct genwqe_dev *cd)
                goto err1;
        }
 
+       file = debugfs_create_u32("use_platform_recovery", 0666, root,
+                                 &cd->use_platform_recovery);
+       if (!file) {
+               ret = -ENOMEM;
+               goto err1;
+       }
+
        cd->debugfs_root = root;
        return 0;
 err1:
index 1d2f163..aae4255 100644 (file)
@@ -1048,10 +1048,15 @@ static long genwqe_ioctl(struct file *filp, unsigned int cmd,
        int rc = 0;
        struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
        struct genwqe_dev *cd = cfile->cd;
+       struct pci_dev *pci_dev = cd->pci_dev;
        struct genwqe_reg_io __user *io;
        u64 val;
        u32 reg_offs;
 
+       /* Return -EIO if card hit EEH */
+       if (pci_channel_offline(pci_dev))
+               return -EIO;
+
        if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE)
                return -EINVAL;
 
index a72a992..7232e40 100644 (file)
@@ -223,6 +223,30 @@ static ssize_t next_bitstream_store(struct device *dev,
 }
 static DEVICE_ATTR_RW(next_bitstream);
 
+static ssize_t reload_bitstream_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t count)
+{
+       int reload;
+       struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+       if (kstrtoint(buf, 0, &reload) < 0)
+               return -EINVAL;
+
+       if (reload == 0x1) {
+               if (cd->card_state == GENWQE_CARD_UNUSED ||
+                   cd->card_state == GENWQE_CARD_USED)
+                       cd->card_state = GENWQE_CARD_RELOAD_BITSTREAM;
+               else
+                       return -EIO;
+       } else {
+               return -EINVAL;
+       }
+
+       return count;
+}
+static DEVICE_ATTR_WO(reload_bitstream);
+
 /*
  * Create device_attribute structures / params: name, mode, show, store
  * additional flag if valid in VF
@@ -239,6 +263,7 @@ static struct attribute *genwqe_attributes[] = {
        &dev_attr_status.attr,
        &dev_attr_freerunning_timer.attr,
        &dev_attr_queue_working_time.attr,
+       &dev_attr_reload_bitstream.attr,
        NULL,
 };
 
index 62cc6bb..a6400f0 100644 (file)
  */
 int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val)
 {
+       struct pci_dev *pci_dev = cd->pci_dev;
+
        if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
                return -EIO;
 
        if (cd->mmio == NULL)
                return -EIO;
 
+       if (pci_channel_offline(pci_dev))
+               return -EIO;
+
        __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs);
        return 0;
 }
@@ -99,12 +104,17 @@ u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs)
  */
 int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val)
 {
+       struct pci_dev *pci_dev = cd->pci_dev;
+
        if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
                return -EIO;
 
        if (cd->mmio == NULL)
                return -EIO;
 
+       if (pci_channel_offline(pci_dev))
+               return -EIO;
+
        __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs);
        return 0;
 }
@@ -718,10 +728,12 @@ int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
        int rc;
        struct pci_dev *pci_dev = cd->pci_dev;
 
-       rc = pci_enable_msi_exact(pci_dev, count);
-       if (rc == 0)
-               cd->flags |= GENWQE_FLAG_MSI_ENABLED;
-       return rc;
+       rc = pci_enable_msi_range(pci_dev, 1, count);
+       if (rc < 0)
+               return rc;
+
+       cd->flags |= GENWQE_FLAG_MSI_ENABLED;
+       return 0;
 }
 
 /**
index cd52631..a506e9a 100644 (file)
@@ -36,7 +36,7 @@
 #include <asm/byteorder.h>
 #include <linux/genwqe/genwqe_card.h>
 
-#define DRV_VERS_STRING                "2.0.15"
+#define DRV_VERS_STRING                "2.0.21"
 
 /*
  * Static minor number assignement, until we decide/implement
index 0a1565e..7ffdb58 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/spi/spi.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
+#include <asm/unaligned.h>
 
 #define FIRMWARE_NAME  "lattice-ecp3.bit"
 
@@ -91,8 +92,8 @@ static void firmware_load(const struct firmware *fw, void *context)
        /* Trying to speak with the FPGA via SPI... */
        txbuf[0] = FPGA_CMD_READ_ID;
        ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
-       dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", *(u32 *)&rxbuf[4]);
-       jedec_id = *(u32 *)&rxbuf[4];
+       jedec_id = get_unaligned_be32(&rxbuf[4]);
+       dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", jedec_id);
 
        for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) {
                if (jedec_id == ecp3_dev[i].jedec_id)
@@ -109,7 +110,8 @@ static void firmware_load(const struct firmware *fw, void *context)
 
        txbuf[0] = FPGA_CMD_READ_STATUS;
        ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
-       dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
+       status = get_unaligned_be32(&rxbuf[4]);
+       dev_dbg(&spi->dev, "FPGA Status=%08x\n", status);
 
        buffer = kzalloc(fw->size + 8, GFP_KERNEL);
        if (!buffer) {
@@ -141,7 +143,7 @@ static void firmware_load(const struct firmware *fw, void *context)
        for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) {
                txbuf[0] = FPGA_CMD_READ_STATUS;
                ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
-               status = *(u32 *)&rxbuf[4];
+               status = get_unaligned_be32(&rxbuf[4]);
                if (status == FPGA_STATUS_CLEARED)
                        break;
 
@@ -164,8 +166,8 @@ static void firmware_load(const struct firmware *fw, void *context)
 
        txbuf[0] = FPGA_CMD_READ_STATUS;
        ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
-       dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
-       status = *(u32 *)&rxbuf[4];
+       status = get_unaligned_be32(&rxbuf[4]);
+       dev_dbg(&spi->dev, "FPGA Status=%08x\n", status);
 
        /* Check result */
        if (status & FPGA_STATUS_DONE)
@@ -196,7 +198,7 @@ static int lattice_ecp3_probe(struct spi_device *spi)
        spi_set_drvdata(spi, data);
 
        init_completion(&data->fw_loaded);
-       err = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
+       err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
                                      FIRMWARE_NAME, &spi->dev,
                                      GFP_KERNEL, spi, firmware_load);
        if (err) {
index d66a2f2..b5abe34 100644 (file)
@@ -870,3 +870,4 @@ module_init(lkdtm_module_init);
 module_exit(lkdtm_module_exit);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kprobe module for testing crash dumps");
index 59d20c5..324e1de 100644 (file)
@@ -459,7 +459,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
 {
        struct mei_device *dev;
        struct mei_cl_cb *cb;
-       int rets, err;
+       int rets;
 
        if (WARN_ON(!cl || !cl->dev))
                return -ENODEV;
@@ -491,6 +491,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
                        cl_err(dev, cl, "failed to disconnect.\n");
                        goto free;
                }
+               cl->timer_count = MEI_CONNECT_TIMEOUT;
                mdelay(10); /* Wait for hardware disconnection ready */
                list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
        } else {
@@ -500,23 +501,18 @@ int mei_cl_disconnect(struct mei_cl *cl)
        }
        mutex_unlock(&dev->device_lock);
 
-       err = wait_event_timeout(dev->wait_recvd_msg,
+       wait_event_timeout(dev->wait_recvd_msg,
                        MEI_FILE_DISCONNECTED == cl->state,
                        mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
 
        mutex_lock(&dev->device_lock);
+
        if (MEI_FILE_DISCONNECTED == cl->state) {
                rets = 0;
                cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
        } else {
-               rets = -ENODEV;
-               if (MEI_FILE_DISCONNECTED != cl->state)
-                       cl_err(dev, cl, "wrong status client disconnect.\n");
-
-               if (err)
-                       cl_dbg(dev, cl, "wait failed disconnect err=%d\n", err);
-
-               cl_err(dev, cl, "failed to disconnect from FW client.\n");
+               cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
+               rets = -ETIME;
        }
 
        mei_io_list_flush(&dev->ctrl_rd_list, cl);
@@ -616,6 +612,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
        mutex_lock(&dev->device_lock);
 
        if (cl->state != MEI_FILE_CONNECTED) {
+               cl->state = MEI_FILE_DISCONNECTED;
                /* something went really wrong */
                if (!cl->status)
                        cl->status = -EFAULT;
index a7856c0..c5feafd 100644 (file)
 #define MEI_DEV_ID_LPT_HR     0x8CBA  /* Lynx Point H Refresh */
 
 #define MEI_DEV_ID_WPT_LP     0x9CBA  /* Wildcat Point LP */
+#define MEI_DEV_ID_WPT_LP_2   0x9CBB  /* Wildcat Point LP 2 */
 
 /* Host Firmware Status Registers in PCI Config Space */
 #define PCI_CFG_HFS_1         0x40
index 6a2d272..a9a0d08 100644 (file)
@@ -710,64 +710,10 @@ end:
        return IRQ_HANDLED;
 }
 
-/**
- * mei_me_fw_status - retrieve fw status from the pci config space
- *
- * @dev: the device structure
- * @fw_status: fw status registers storage
- *
- * returns 0 on success an error code otherwise
- */
-static int mei_me_fw_status(struct mei_device *dev,
-                           struct mei_fw_status *fw_status)
-{
-       const u32 pci_cfg_reg[] = {PCI_CFG_HFS_1, PCI_CFG_HFS_2};
-       int i;
-
-       if (!fw_status)
-               return -EINVAL;
-
-       switch (dev->pdev->device) {
-       case MEI_DEV_ID_IBXPK_1:
-       case MEI_DEV_ID_IBXPK_2:
-       case MEI_DEV_ID_CPT_1:
-       case MEI_DEV_ID_PBG_1:
-       case MEI_DEV_ID_PPT_1:
-       case MEI_DEV_ID_PPT_2:
-       case MEI_DEV_ID_PPT_3:
-       case MEI_DEV_ID_LPT_H:
-       case MEI_DEV_ID_LPT_W:
-       case MEI_DEV_ID_LPT_LP:
-       case MEI_DEV_ID_LPT_HR:
-       case MEI_DEV_ID_WPT_LP:
-               fw_status->count = 2;
-               break;
-       case MEI_DEV_ID_ICH10_1:
-       case MEI_DEV_ID_ICH10_2:
-       case MEI_DEV_ID_ICH10_3:
-       case MEI_DEV_ID_ICH10_4:
-               fw_status->count = 1;
-               break;
-       default:
-               fw_status->count = 0;
-               break;
-       }
-
-       for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) {
-               int ret;
-               ret = pci_read_config_dword(dev->pdev,
-                               pci_cfg_reg[i], &fw_status->status[i]);
-               if (ret)
-                       return ret;
-       }
-       return 0;
-}
-
 static const struct mei_hw_ops mei_me_hw_ops = {
 
        .pg_state  = mei_me_pg_state,
 
-       .fw_status = mei_me_fw_status,
        .host_is_ready = mei_me_host_is_ready,
 
        .hw_is_ready = mei_me_hw_is_ready,
index 9327378..f1cd166 100644 (file)
@@ -1042,40 +1042,8 @@ end:
        return IRQ_HANDLED;
 }
 
-
-/**
- * mei_txe_fw_status - retrieve fw status from the pci config space
- *
- * @dev: the device structure
- * @fw_status: fw status registers storage
- *
- * returns: 0 on success an error code otherwise
- */
-static int mei_txe_fw_status(struct mei_device *dev,
-                            struct mei_fw_status *fw_status)
-{
-       const u32 pci_cfg_reg[] = {PCI_CFG_TXE_FW_STS0, PCI_CFG_TXE_FW_STS1};
-       int i;
-
-       if (!fw_status)
-               return -EINVAL;
-
-       fw_status->count = 2;
-
-       for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) {
-               int ret;
-               ret = pci_read_config_dword(dev->pdev,
-                               pci_cfg_reg[i], &fw_status->status[i]);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
 static const struct mei_hw_ops mei_txe_hw_ops = {
 
-       .fw_status = mei_txe_fw_status,
        .host_is_ready = mei_txe_host_is_ready,
 
        .pg_state = mei_txe_pg_state,
index 66f0a1a..401a3d5 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/compat.h>
 #include <linux/jiffies.h>
 #include <linux/interrupt.h>
-#include <linux/miscdevice.h>
 
 #include <linux/mei.h>
 
  */
 static int mei_open(struct inode *inode, struct file *file)
 {
-       struct miscdevice *misc = file->private_data;
-       struct pci_dev *pdev;
-       struct mei_cl *cl;
        struct mei_device *dev;
+       struct mei_cl *cl;
 
        int err;
 
-       if (!misc->parent)
-               return -ENODEV;
-
-       pdev = container_of(misc->parent, struct pci_dev, dev);
-
-       dev = pci_get_drvdata(pdev);
+       dev = container_of(inode->i_cdev, struct mei_device, cdev);
        if (!dev)
                return -ENODEV;
 
@@ -667,46 +659,148 @@ static const struct file_operations mei_fops = {
        .llseek = no_llseek
 };
 
-/*
- * Misc Device Struct
+static struct class *mei_class;
+static dev_t mei_devt;
+#define MEI_MAX_DEVS  MINORMASK
+static DEFINE_MUTEX(mei_minor_lock);
+static DEFINE_IDR(mei_idr);
+
+/**
+ * mei_minor_get - obtain next free device minor number
+ *
+ * @dev:  device pointer
+ *
+ * returns allocated minor, or -ENOSPC if no free minor left
  */
-static struct miscdevice  mei_misc_device = {
-               .name = "mei",
-               .fops = &mei_fops,
-               .minor = MISC_DYNAMIC_MINOR,
-};
+static int mei_minor_get(struct mei_device *dev)
+{
+       int ret;
+
+       mutex_lock(&mei_minor_lock);
+       ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL);
+       if (ret >= 0)
+               dev->minor = ret;
+       else if (ret == -ENOSPC)
+               dev_err(&dev->pdev->dev, "too many mei devices\n");
 
+       mutex_unlock(&mei_minor_lock);
+       return ret;
+}
 
-int mei_register(struct mei_device *dev)
+/**
+ * mei_minor_free - mark device minor number as free
+ *
+ * @dev:  device pointer
+ */
+static void mei_minor_free(struct mei_device *dev)
 {
-       int ret;
-       mei_misc_device.parent = &dev->pdev->dev;
-       ret = misc_register(&mei_misc_device);
-       if (ret)
+       mutex_lock(&mei_minor_lock);
+       idr_remove(&mei_idr, dev->minor);
+       mutex_unlock(&mei_minor_lock);
+}
+
+int mei_register(struct mei_device *dev, struct device *parent)
+{
+       struct device *clsdev; /* class device */
+       int ret, devno;
+
+       ret = mei_minor_get(dev);
+       if (ret < 0)
                return ret;
 
-       if (mei_dbgfs_register(dev, mei_misc_device.name))
-               dev_err(&dev->pdev->dev, "cannot register debugfs\n");
+       /* Fill in the data structures */
+       devno = MKDEV(MAJOR(mei_devt), dev->minor);
+       cdev_init(&dev->cdev, &mei_fops);
+       dev->cdev.owner = mei_fops.owner;
+
+       /* Add the device */
+       ret = cdev_add(&dev->cdev, devno, 1);
+       if (ret) {
+               dev_err(parent, "unable to add device %d:%d\n",
+                       MAJOR(mei_devt), dev->minor);
+               goto err_dev_add;
+       }
+
+       clsdev = device_create(mei_class, parent, devno,
+                        NULL, "mei%d", dev->minor);
+
+       if (IS_ERR(clsdev)) {
+               dev_err(parent, "unable to create device %d:%d\n",
+                       MAJOR(mei_devt), dev->minor);
+               ret = PTR_ERR(clsdev);
+               goto err_dev_create;
+       }
+
+       ret = mei_dbgfs_register(dev, dev_name(clsdev));
+       if (ret) {
+               dev_err(clsdev, "cannot register debugfs ret = %d\n", ret);
+               goto err_dev_dbgfs;
+       }
 
        return 0;
+
+err_dev_dbgfs:
+       device_destroy(mei_class, devno);
+err_dev_create:
+       cdev_del(&dev->cdev);
+err_dev_add:
+       mei_minor_free(dev);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(mei_register);
 
 void mei_deregister(struct mei_device *dev)
 {
+       int devno;
+
+       devno = dev->cdev.dev;
+       cdev_del(&dev->cdev);
+
        mei_dbgfs_deregister(dev);
-       misc_deregister(&mei_misc_device);
-       mei_misc_device.parent = NULL;
+
+       device_destroy(mei_class, devno);
+
+       mei_minor_free(dev);
 }
 EXPORT_SYMBOL_GPL(mei_deregister);
 
 static int __init mei_init(void)
 {
-       return mei_cl_bus_init();
+       int ret;
+
+       mei_class = class_create(THIS_MODULE, "mei");
+       if (IS_ERR(mei_class)) {
+               pr_err("couldn't create class\n");
+               ret = PTR_ERR(mei_class);
+               goto err;
+       }
+
+       ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei");
+       if (ret < 0) {
+               pr_err("unable to allocate char dev region\n");
+               goto err_class;
+       }
+
+       ret = mei_cl_bus_init();
+       if (ret < 0) {
+               pr_err("unable to initialize bus\n");
+               goto err_chrdev;
+       }
+
+       return 0;
+
+err_chrdev:
+       unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
+err_class:
+       class_destroy(mei_class);
+err:
+       return ret;
 }
 
 static void __exit mei_exit(void)
 {
+       unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
+       class_destroy(mei_class);
        mei_cl_bus_exit();
 }
 
index 5c7e990..0b0d613 100644 (file)
@@ -227,7 +227,6 @@ struct mei_cl {
 
 /** struct mei_hw_ops
  *
- * @fw_status        - read FW status from PCI config space
  * @host_is_ready    - query for host readiness
 
  * @hw_is_ready      - query if hw is ready
@@ -255,8 +254,6 @@ struct mei_cl {
  */
 struct mei_hw_ops {
 
-       int (*fw_status)(struct mei_device *dev,
-               struct mei_fw_status *fw_status);
        bool (*host_is_ready)(struct mei_device *dev);
 
        bool (*hw_is_ready)(struct mei_device *dev);
@@ -400,6 +397,10 @@ struct mei_cfg {
 /**
  * struct mei_device -  MEI private device struct
 
+ * @pdev - pointer to pci device struct
+ * @cdev - character device
+ * @minor - minor number allocated for device
+ *
  * @reset_count - limits the number of consecutive resets
  * @hbm_state - state of host bus message protocol
  * @pg_event - power gating event
@@ -412,6 +413,9 @@ struct mei_cfg {
  */
 struct mei_device {
        struct pci_dev *pdev;   /* pointer to pci device struct */
+       struct cdev cdev;
+       int minor;
+
        /*
         * lists of queues
         */
@@ -741,7 +745,7 @@ static inline int mei_dbgfs_register(struct mei_device *dev, const char *name)
 static inline void mei_dbgfs_deregister(struct mei_device *dev) {}
 #endif /* CONFIG_DEBUG_FS */
 
-int mei_register(struct mei_device *dev);
+int mei_register(struct mei_device *dev, struct device *parent);
 void mei_deregister(struct mei_device *dev);
 
 #define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d"
index 1b46c64..a0e9422 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/compat.h>
 #include <linux/jiffies.h>
 #include <linux/interrupt.h>
-#include <linux/miscdevice.h>
 
 #include <linux/pm_runtime.h>
 
@@ -82,6 +81,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch_cfg)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_lpt_cfg)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch_cfg)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch_cfg)},
 
        /* required last entry */
        {0, }
@@ -207,7 +207,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
        pm_runtime_use_autosuspend(&pdev->dev);
 
-       err = mei_register(dev);
+       err = mei_register(dev, &pdev->dev);
        if (err)
                goto release_irq;
 
@@ -369,7 +369,7 @@ static int mei_me_pm_runtime_idle(struct device *device)
        if (!dev)
                return -ENODEV;
        if (mei_write_is_idle(dev))
-               pm_schedule_suspend(device, MEI_ME_RPM_TIMEOUT * 2);
+               pm_runtime_autosuspend(device);
 
        return -EBUSY;
 }
index 2343c62..19de573 100644 (file)
@@ -149,7 +149,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
        pm_runtime_use_autosuspend(&pdev->dev);
 
-       err = mei_register(dev);
+       err = mei_register(dev, &pdev->dev);
        if (err)
                goto release_irq;
 
@@ -306,7 +306,7 @@ static int mei_txe_pm_runtime_idle(struct device *device)
        if (!dev)
                return -ENODEV;
        if (mei_write_is_idle(dev))
-               pm_schedule_suspend(device, MEI_TXI_RPM_TIMEOUT * 2);
+               pm_runtime_autosuspend(device);
 
        return -EBUSY;
 }
index 462a5b1..cc4eef0 100644 (file)
@@ -1,8 +1,25 @@
+comment "Intel MIC Bus Driver"
+
+config INTEL_MIC_BUS
+       tristate "Intel MIC Bus Driver"
+       depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
+       help
+         This option is selected by any driver which registers a
+         device or driver on the MIC Bus, such as CONFIG_INTEL_MIC_HOST,
+         CONFIG_INTEL_MIC_CARD, CONFIG_INTEL_MIC_X100_DMA etc.
+
+         If you are building a host/card kernel with an Intel MIC device
+         then say M (recommended) or Y, else say N. If unsure say N.
+
+         More information about the Intel MIC family as well as the Linux
+         OS and tools for MIC to use with this driver are available from
+         <http://software.intel.com/en-us/mic-developer>.
+
 comment "Intel MIC Host Driver"
 
 config INTEL_MIC_HOST
        tristate "Intel MIC Host Driver"
-       depends on 64BIT && PCI && X86
+       depends on 64BIT && PCI && X86 && INTEL_MIC_BUS
        select VHOST_RING
        help
          This enables Host Driver support for the Intel Many Integrated
@@ -22,7 +39,7 @@ comment "Intel MIC Card Driver"
 
 config INTEL_MIC_CARD
        tristate "Intel MIC Card Driver"
-       depends on 64BIT && X86
+       depends on 64BIT && X86 && INTEL_MIC_BUS
        select VIRTIO
        help
          This enables card driver support for the Intel Many Integrated
index 05b34d6..e9bf148 100644 (file)
@@ -4,3 +4,4 @@
 #
 obj-$(CONFIG_INTEL_MIC_HOST) += host/
 obj-$(CONFIG_INTEL_MIC_CARD) += card/
+obj-$(CONFIG_INTEL_MIC_BUS) += bus/
diff --git a/drivers/misc/mic/bus/Makefile b/drivers/misc/mic/bus/Makefile
new file mode 100644 (file)
index 0000000..d85c7f2
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile - Intel MIC Linux driver.
+# Copyright(c) 2014, Intel Corporation.
+#
+obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o
diff --git a/drivers/misc/mic/bus/mic_bus.c b/drivers/misc/mic/bus/mic_bus.c
new file mode 100644 (file)
index 0000000..961ae90
--- /dev/null
@@ -0,0 +1,218 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Bus driver.
+ *
+ * This implementation is very similar to the the virtio bus driver
+ * implementation @ drivers/virtio/virtio.c
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/mic_bus.h>
+
+/* Unique numbering for mbus devices. */
+static DEFINE_IDA(mbus_index_ida);
+
+static ssize_t device_show(struct device *d,
+                          struct device_attribute *attr, char *buf)
+{
+       struct mbus_device *dev = dev_to_mbus(d);
+       return sprintf(buf, "0x%04x\n", dev->id.device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t vendor_show(struct device *d,
+                          struct device_attribute *attr, char *buf)
+{
+       struct mbus_device *dev = dev_to_mbus(d);
+       return sprintf(buf, "0x%04x\n", dev->id.vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t modalias_show(struct device *d,
+                            struct device_attribute *attr, char *buf)
+{
+       struct mbus_device *dev = dev_to_mbus(d);
+       return sprintf(buf, "mbus:d%08Xv%08X\n",
+                      dev->id.device, dev->id.vendor);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *mbus_dev_attrs[] = {
+       &dev_attr_device.attr,
+       &dev_attr_vendor.attr,
+       &dev_attr_modalias.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(mbus_dev);
+
+static inline int mbus_id_match(const struct mbus_device *dev,
+                               const struct mbus_device_id *id)
+{
+       if (id->device != dev->id.device && id->device != MBUS_DEV_ANY_ID)
+               return 0;
+
+       return id->vendor == MBUS_DEV_ANY_ID || id->vendor == dev->id.vendor;
+}
+
+/*
+ * This looks through all the IDs a driver claims to support.  If any of them
+ * match, we return 1 and the kernel will call mbus_dev_probe().
+ */
+static int mbus_dev_match(struct device *dv, struct device_driver *dr)
+{
+       unsigned int i;
+       struct mbus_device *dev = dev_to_mbus(dv);
+       const struct mbus_device_id *ids;
+
+       ids = drv_to_mbus(dr)->id_table;
+       for (i = 0; ids[i].device; i++)
+               if (mbus_id_match(dev, &ids[i]))
+                       return 1;
+       return 0;
+}
+
+static int mbus_uevent(struct device *dv, struct kobj_uevent_env *env)
+{
+       struct mbus_device *dev = dev_to_mbus(dv);
+
+       return add_uevent_var(env, "MODALIAS=mbus:d%08Xv%08X",
+                             dev->id.device, dev->id.vendor);
+}
+
+static int mbus_dev_probe(struct device *d)
+{
+       int err;
+       struct mbus_device *dev = dev_to_mbus(d);
+       struct mbus_driver *drv = drv_to_mbus(dev->dev.driver);
+
+       err = drv->probe(dev);
+       if (!err)
+               if (drv->scan)
+                       drv->scan(dev);
+       return err;
+}
+
+static int mbus_dev_remove(struct device *d)
+{
+       struct mbus_device *dev = dev_to_mbus(d);
+       struct mbus_driver *drv = drv_to_mbus(dev->dev.driver);
+
+       drv->remove(dev);
+       return 0;
+}
+
+static struct bus_type mic_bus = {
+       .name  = "mic_bus",
+       .match = mbus_dev_match,
+       .dev_groups = mbus_dev_groups,
+       .uevent = mbus_uevent,
+       .probe = mbus_dev_probe,
+       .remove = mbus_dev_remove,
+};
+
+int mbus_register_driver(struct mbus_driver *driver)
+{
+       driver->driver.bus = &mic_bus;
+       return driver_register(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(mbus_register_driver);
+
+void mbus_unregister_driver(struct mbus_driver *driver)
+{
+       driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(mbus_unregister_driver);
+
+static void mbus_release_dev(struct device *d)
+{
+       struct mbus_device *mbdev = dev_to_mbus(d);
+       kfree(mbdev);
+}
+
+struct mbus_device *
+mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
+                    struct mbus_hw_ops *hw_ops, void __iomem *mmio_va)
+{
+       int ret;
+       struct mbus_device *mbdev;
+
+       mbdev = kzalloc(sizeof(*mbdev), GFP_KERNEL);
+       if (!mbdev)
+               return ERR_PTR(-ENOMEM);
+
+       mbdev->mmio_va = mmio_va;
+       mbdev->dev.parent = pdev;
+       mbdev->id.device = id;
+       mbdev->id.vendor = MBUS_DEV_ANY_ID;
+       mbdev->dev.archdata.dma_ops = dma_ops;
+       mbdev->dev.dma_mask = &mbdev->dev.coherent_dma_mask;
+       dma_set_mask(&mbdev->dev, DMA_BIT_MASK(64));
+       mbdev->dev.release = mbus_release_dev;
+       mbdev->hw_ops = hw_ops;
+       mbdev->dev.bus = &mic_bus;
+
+       /* Assign a unique device index and hence name. */
+       ret = ida_simple_get(&mbus_index_ida, 0, 0, GFP_KERNEL);
+       if (ret < 0)
+               goto free_mbdev;
+
+       mbdev->index = ret;
+       dev_set_name(&mbdev->dev, "mbus-dev%u", mbdev->index);
+       /*
+        * device_register() causes the bus infrastructure to look for a
+        * matching driver.
+        */
+       ret = device_register(&mbdev->dev);
+       if (ret)
+               goto ida_remove;
+       return mbdev;
+ida_remove:
+       ida_simple_remove(&mbus_index_ida, mbdev->index);
+free_mbdev:
+       kfree(mbdev);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(mbus_register_device);
+
+void mbus_unregister_device(struct mbus_device *mbdev)
+{
+       int index = mbdev->index; /* save for after device release */
+
+       device_unregister(&mbdev->dev);
+       ida_simple_remove(&mbus_index_ida, index);
+}
+EXPORT_SYMBOL_GPL(mbus_unregister_device);
+
+static int __init mbus_init(void)
+{
+       return bus_register(&mic_bus);
+}
+
+static void __exit mbus_exit(void)
+{
+       bus_unregister(&mic_bus);
+       ida_destroy(&mbus_index_ida);
+}
+
+core_initcall(mbus_init);
+module_exit(mbus_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) MIC Bus driver");
+MODULE_LICENSE("GPL v2");
index d0980ff..83819ee 100644 (file)
@@ -83,8 +83,8 @@ static int mic_shutdown_init(void)
        int shutdown_db;
 
        shutdown_db = mic_next_card_db();
-       shutdown_cookie = mic_request_card_irq(mic_shutdown_isr,
-                       "Shutdown", mdrv, shutdown_db);
+       shutdown_cookie = mic_request_card_irq(mic_shutdown_isr, NULL,
+                                              "Shutdown", mdrv, shutdown_db);
        if (IS_ERR(shutdown_cookie))
                rc = PTR_ERR(shutdown_cookie);
        else
@@ -136,7 +136,8 @@ static void mic_dp_uninit(void)
 /**
  * mic_request_card_irq - request an irq.
  *
- * @func: The callback function that handles the interrupt.
+ * @handler: interrupt handler passed to request_threaded_irq.
+ * @thread_fn: thread fn. passed to request_threaded_irq.
  * @name: The ASCII name of the callee requesting the irq.
  * @data: private data that is returned back when calling the
  * function handler.
@@ -149,17 +150,19 @@ static void mic_dp_uninit(void)
  * error code.
  *
  */
-struct mic_irq *mic_request_card_irq(irqreturn_t (*func)(int irq, void *data),
-       const char *name, void *data, int index)
+struct mic_irq *
+mic_request_card_irq(irq_handler_t handler,
+                    irq_handler_t thread_fn, const char *name,
+                    void *data, int index)
 {
        int rc = 0;
        unsigned long cookie;
        struct mic_driver *mdrv = g_drv;
 
-       rc  = request_irq(mic_db_to_irq(mdrv, index), func,
-               0, name, data);
+       rc  = request_threaded_irq(mic_db_to_irq(mdrv, index), handler,
+                                  thread_fn, 0, name, data);
        if (rc) {
-               dev_err(mdrv->dev, "request_irq failed rc = %d\n", rc);
+               dev_err(mdrv->dev, "request_threaded_irq failed rc = %d\n", rc);
                goto err;
        }
        mdrv->irq_info.irq_usage_count[index]++;
@@ -172,9 +175,9 @@ err:
 /**
  * mic_free_card_irq - free irq.
  *
- * @cookie: cookie obtained during a successful call to mic_request_irq
+ * @cookie: cookie obtained during a successful call to mic_request_threaded_irq
  * @data: private data specified by the calling function during the
- * mic_request_irq
+ * mic_request_threaded_irq
  *
  * returns: none.
  */
index 306f502..844be8f 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/workqueue.h>
 #include <linux/io.h>
 #include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/mic_bus.h>
 
 /**
  * struct mic_intr_info - Contains h/w specific interrupt sources info
@@ -70,6 +72,7 @@ struct mic_device {
  * @hotplug_work: Hot plug work for adding/removing virtio devices.
  * @irq_info: The OS specific irq information
  * @intr_info: H/W specific interrupt information.
+ * @dma_mbdev: dma device on the MIC virtual bus.
  */
 struct mic_driver {
        char name[20];
@@ -80,6 +83,7 @@ struct mic_driver {
        struct work_struct hotplug_work;
        struct mic_irq_info irq_info;
        struct mic_intr_info intr_info;
+       struct mbus_device *dma_mbdev;
 };
 
 /**
@@ -116,8 +120,9 @@ mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset)
 int mic_driver_init(struct mic_driver *mdrv);
 void mic_driver_uninit(struct mic_driver *mdrv);
 int mic_next_card_db(void);
-struct mic_irq *mic_request_card_irq(irqreturn_t (*func)(int irq, void *data),
-       const char *name, void *data, int intr_src);
+struct mic_irq *
+mic_request_card_irq(irq_handler_t handler, irq_handler_t thread_fn,
+                    const char *name, void *data, int intr_src);
 void mic_free_card_irq(struct mic_irq *cookie, void *data);
 u32 mic_read_spad(struct mic_device *mdev, unsigned int idx);
 void mic_send_intr(struct mic_device *mdev, int doorbell);
index 653799b..f14b600 100644 (file)
@@ -417,7 +417,7 @@ static int mic_add_device(struct mic_device_desc __iomem *d,
 
        virtio_db = mic_next_card_db();
        mvdev->virtio_cookie = mic_request_card_irq(mic_virtio_intr_handler,
-                       "virtio intr", mvdev, virtio_db);
+                       NULL, "virtio intr", mvdev, virtio_db);
        if (IS_ERR(mvdev->virtio_cookie)) {
                ret = PTR_ERR(mvdev->virtio_cookie);
                goto kfree;
@@ -606,8 +606,9 @@ int mic_devices_init(struct mic_driver *mdrv)
        mic_scan_devices(mdrv, !REMOVE_DEVICES);
 
        config_db = mic_next_card_db();
-       virtio_config_cookie = mic_request_card_irq(mic_extint_handler,
-                       "virtio_config_intr", mdrv, config_db);
+       virtio_config_cookie = mic_request_card_irq(mic_extint_handler, NULL,
+                                                   "virtio_config_intr", mdrv,
+                                                   config_db);
        if (IS_ERR(virtio_config_cookie)) {
                rc = PTR_ERR(virtio_config_cookie);
                goto exit;
index 2868945..9d57545 100644 (file)
@@ -148,6 +148,47 @@ void mic_card_unmap(struct mic_device *mdev, void __iomem *addr)
        iounmap(addr);
 }
 
+static inline struct mic_driver *mbdev_to_mdrv(struct mbus_device *mbdev)
+{
+       return dev_get_drvdata(mbdev->dev.parent);
+}
+
+static struct mic_irq *
+_mic_request_threaded_irq(struct mbus_device *mbdev,
+                         irq_handler_t handler, irq_handler_t thread_fn,
+                         const char *name, void *data, int intr_src)
+{
+       int rc = 0;
+       unsigned int irq = intr_src;
+       unsigned long cookie = irq;
+
+       rc  = request_threaded_irq(irq, handler, thread_fn, 0, name, data);
+       if (rc) {
+               dev_err(mbdev_to_mdrv(mbdev)->dev,
+                       "request_threaded_irq failed rc = %d\n", rc);
+               return ERR_PTR(rc);
+       }
+       return (struct mic_irq *)cookie;
+}
+
+static void _mic_free_irq(struct mbus_device *mbdev,
+                         struct mic_irq *cookie, void *data)
+{
+       unsigned long irq = (unsigned long)cookie;
+       free_irq(irq, data);
+}
+
+static void _mic_ack_interrupt(struct mbus_device *mbdev, int num)
+{
+       mic_ack_interrupt(&mbdev_to_mdrv(mbdev)->mdev);
+}
+
+static struct mbus_hw_ops mbus_hw_ops = {
+       .request_threaded_irq = _mic_request_threaded_irq,
+       .free_irq = _mic_free_irq,
+       .ack_interrupt = _mic_ack_interrupt,
+};
+
 static int __init mic_probe(struct platform_device *pdev)
 {
        struct mic_driver *mdrv = &g_drv;
@@ -159,32 +200,41 @@ static int __init mic_probe(struct platform_device *pdev)
 
        mdev->mmio.pa = MIC_X100_MMIO_BASE;
        mdev->mmio.len = MIC_X100_MMIO_LEN;
-       mdev->mmio.va = ioremap(MIC_X100_MMIO_BASE, MIC_X100_MMIO_LEN);
+       mdev->mmio.va = devm_ioremap(&pdev->dev, MIC_X100_MMIO_BASE,
+                                    MIC_X100_MMIO_LEN);
        if (!mdev->mmio.va) {
                dev_err(&pdev->dev, "Cannot remap MMIO BAR\n");
                rc = -EIO;
                goto done;
        }
        mic_hw_intr_init(mdrv);
+       platform_set_drvdata(pdev, mdrv);
+       mdrv->dma_mbdev = mbus_register_device(mdrv->dev, MBUS_DEV_DMA_MIC,
+                                              NULL, &mbus_hw_ops,
+                                              mdrv->mdev.mmio.va);
+       if (IS_ERR(mdrv->dma_mbdev)) {
+               rc = PTR_ERR(mdrv->dma_mbdev);
+               dev_err(&pdev->dev, "mbus_add_device failed rc %d\n", rc);
+               goto done;
+       }
        rc = mic_driver_init(mdrv);
        if (rc) {
                dev_err(&pdev->dev, "mic_driver_init failed rc %d\n", rc);
-               goto iounmap;
+               goto remove_dma;
        }
 done:
        return rc;
-iounmap:
-       iounmap(mdev->mmio.va);
+remove_dma:
+       mbus_unregister_device(mdrv->dma_mbdev);
        return rc;
 }
 
 static int mic_remove(struct platform_device *pdev)
 {
        struct mic_driver *mdrv = &g_drv;
-       struct mic_device *mdev = &mdrv->mdev;
 
        mic_driver_uninit(mdrv);
-       iounmap(mdev->mmio.va);
+       mbus_unregister_device(mdrv->dma_mbdev);
        return 0;
 }
 
index b75c6b5..ff2b0fb 100644 (file)
 #include <linux/pci.h>
 
 #include <linux/mic_common.h>
+#include <linux/mic_bus.h>
 #include "../common/mic_dev.h"
 #include "mic_device.h"
 #include "mic_smpt.h"
 #include "mic_virtio.h"
 
+static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev)
+{
+       return dev_get_drvdata(mbdev->dev.parent);
+}
+
+static dma_addr_t
+mic_dma_map_page(struct device *dev, struct page *page,
+                unsigned long offset, size_t size, enum dma_data_direction dir,
+                struct dma_attrs *attrs)
+{
+       void *va = phys_to_virt(page_to_phys(page)) + offset;
+       struct mic_device *mdev = dev_get_drvdata(dev->parent);
+
+       return mic_map_single(mdev, va, size);
+}
+
+static void
+mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+                  size_t size, enum dma_data_direction dir,
+                  struct dma_attrs *attrs)
+{
+       struct mic_device *mdev = dev_get_drvdata(dev->parent);
+       mic_unmap_single(mdev, dma_addr, size);
+}
+
+static struct dma_map_ops mic_dma_ops = {
+       .map_page = mic_dma_map_page,
+       .unmap_page = mic_dma_unmap_page,
+};
+
+static struct mic_irq *
+_mic_request_threaded_irq(struct mbus_device *mbdev,
+                         irq_handler_t handler, irq_handler_t thread_fn,
+                         const char *name, void *data, int intr_src)
+{
+       return mic_request_threaded_irq(mbdev_to_mdev(mbdev), handler,
+                                       thread_fn, name, data,
+                                       intr_src, MIC_INTR_DMA);
+}
+
+static void _mic_free_irq(struct mbus_device *mbdev,
+                         struct mic_irq *cookie, void *data)
+{
+       return mic_free_irq(mbdev_to_mdev(mbdev), cookie, data);
+}
+
+static void _mic_ack_interrupt(struct mbus_device *mbdev, int num)
+{
+       struct mic_device *mdev = mbdev_to_mdev(mbdev);
+       mdev->ops->intr_workarounds(mdev);
+}
+
+static struct mbus_hw_ops mbus_hw_ops = {
+       .request_threaded_irq = _mic_request_threaded_irq,
+       .free_irq = _mic_free_irq,
+       .ack_interrupt = _mic_ack_interrupt,
+};
+
 /**
  * mic_reset - Reset the MIC device.
  * @mdev: pointer to mic_device instance
@@ -95,9 +154,21 @@ retry:
                 */
                goto retry;
        }
+       mdev->dma_mbdev = mbus_register_device(mdev->sdev->parent,
+                                              MBUS_DEV_DMA_HOST, &mic_dma_ops,
+                                              &mbus_hw_ops, mdev->mmio.va);
+       if (IS_ERR(mdev->dma_mbdev)) {
+               rc = PTR_ERR(mdev->dma_mbdev);
+               goto unlock_ret;
+       }
+       mdev->dma_ch = mic_request_dma_chan(mdev);
+       if (!mdev->dma_ch) {
+               rc = -ENXIO;
+               goto dma_remove;
+       }
        rc = mdev->ops->load_mic_fw(mdev, buf);
        if (rc)
-               goto unlock_ret;
+               goto dma_release;
        mic_smpt_restore(mdev);
        mic_intr_restore(mdev);
        mdev->intr_ops->enable_interrupts(mdev);
@@ -105,6 +176,11 @@ retry:
        mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
        mdev->ops->send_firmware_intr(mdev);
        mic_set_state(mdev, MIC_ONLINE);
+       goto unlock_ret;
+dma_release:
+       dma_release_channel(mdev->dma_ch);
+dma_remove:
+       mbus_unregister_device(mdev->dma_mbdev);
 unlock_ret:
        mutex_unlock(&mdev->mic_mutex);
        return rc;
@@ -122,6 +198,11 @@ void mic_stop(struct mic_device *mdev, bool force)
        mutex_lock(&mdev->mic_mutex);
        if (MIC_OFFLINE != mdev->state || force) {
                mic_virtio_reset_devices(mdev);
+               if (mdev->dma_ch) {
+                       dma_release_channel(mdev->dma_ch);
+                       mdev->dma_ch = NULL;
+               }
+               mbus_unregister_device(mdev->dma_mbdev);
                mic_bootparam_init(mdev);
                mic_reset(mdev);
                if (MIC_RESET_FAILED == mdev->state)
index 0398c69..016bd15 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/idr.h>
 #include <linux/notifier.h>
 #include <linux/irqreturn.h>
+#include <linux/dmaengine.h>
+#include <linux/mic_bus.h>
 
 #include "mic_intr.h"
 
@@ -87,6 +89,8 @@ enum mic_stepping {
  * @cdev: Character device for MIC.
  * @vdev_list: list of virtio devices.
  * @pm_notifier: Handles PM notifications from the OS.
+ * @dma_mbdev: MIC BUS DMA device.
+ * @dma_ch: DMA channel reserved by this driver for use by virtio devices.
  */
 struct mic_device {
        struct mic_mw mmio;
@@ -124,6 +128,8 @@ struct mic_device {
        struct cdev cdev;
        struct list_head vdev_list;
        struct notifier_block pm_notifier;
+       struct mbus_device *dma_mbdev;
+       struct dma_chan *dma_ch;
 };
 
 /**
@@ -144,6 +150,7 @@ struct mic_device {
  * @load_mic_fw: Load firmware segments required to boot the card
  * into card memory. This includes the kernel, command line, ramdisk etc.
  * @get_postcode: Get post code status from firmware.
+ * @dma_filter: DMA filter function to be used.
  */
 struct mic_hw_ops {
        u8 aper_bar;
@@ -159,6 +166,7 @@ struct mic_hw_ops {
        void (*send_firmware_intr)(struct mic_device *mdev);
        int (*load_mic_fw)(struct mic_device *mdev, const char *buf);
        u32 (*get_postcode)(struct mic_device *mdev);
+       bool (*dma_filter)(struct dma_chan *chan, void *param);
 };
 
 /**
@@ -187,6 +195,22 @@ mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset)
        iowrite32(val, mw->va + offset);
 }
 
+static inline struct dma_chan *mic_request_dma_chan(struct mic_device *mdev)
+{
+       dma_cap_mask_t mask;
+       struct dma_chan *chan;
+
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_MEMCPY, mask);
+       chan = dma_request_channel(mask, mdev->ops->dma_filter,
+                                  mdev->sdev->parent);
+       if (chan)
+               return chan;
+       dev_err(mdev->sdev->parent, "%s %d unable to acquire channel\n",
+               __func__, __LINE__);
+       return NULL;
+}
+
 void mic_sysfs_init(struct mic_device *mdev);
 int mic_start(struct mic_device *mdev, const char *buf);
 void mic_stop(struct mic_device *mdev, bool force);
index dbc5afd..d686f28 100644 (file)
 #include "../common/mic_dev.h"
 #include "mic_device.h"
 
-/*
- * mic_invoke_callback - Invoke callback functions registered for
- * the corresponding source id.
- *
- * @mdev: pointer to the mic_device instance
- * @idx: The interrupt source id.
- *
- * Returns none.
- */
-static inline void mic_invoke_callback(struct mic_device *mdev, int idx)
+static irqreturn_t mic_thread_fn(int irq, void *dev)
 {
+       struct mic_device *mdev = dev;
+       struct mic_intr_info *intr_info = mdev->intr_info;
+       struct mic_irq_info *irq_info = &mdev->irq_info;
        struct mic_intr_cb *intr_cb;
        struct pci_dev *pdev = container_of(mdev->sdev->parent,
-               struct pci_dev, dev);
+                                           struct pci_dev, dev);
+       int i;
 
-       spin_lock(&mdev->irq_info.mic_intr_lock);
-       list_for_each_entry(intr_cb, &mdev->irq_info.cb_list[idx], list)
-               if (intr_cb->func)
-                       intr_cb->func(pdev->irq, intr_cb->data);
-       spin_unlock(&mdev->irq_info.mic_intr_lock);
+       spin_lock(&irq_info->mic_thread_lock);
+       for (i = intr_info->intr_start_idx[MIC_INTR_DB];
+                       i < intr_info->intr_len[MIC_INTR_DB]; i++)
+               if (test_and_clear_bit(i, &irq_info->mask)) {
+                       list_for_each_entry(intr_cb, &irq_info->cb_list[i],
+                                           list)
+                               if (intr_cb->thread_fn)
+                                       intr_cb->thread_fn(pdev->irq,
+                                                        intr_cb->data);
+               }
+       spin_unlock(&irq_info->mic_thread_lock);
+       return IRQ_HANDLED;
 }
-
 /**
  * mic_interrupt - Generic interrupt handler for
  * MSI and INTx based interrupts.
@@ -53,7 +54,11 @@ static inline void mic_invoke_callback(struct mic_device *mdev, int idx)
 static irqreturn_t mic_interrupt(int irq, void *dev)
 {
        struct mic_device *mdev = dev;
-       struct mic_intr_info *info = mdev->intr_info;
+       struct mic_intr_info *intr_info = mdev->intr_info;
+       struct mic_irq_info *irq_info = &mdev->irq_info;
+       struct mic_intr_cb *intr_cb;
+       struct pci_dev *pdev = container_of(mdev->sdev->parent,
+                                           struct pci_dev, dev);
        u32 mask;
        int i;
 
@@ -61,12 +66,19 @@ static irqreturn_t mic_interrupt(int irq, void *dev)
        if (!mask)
                return IRQ_NONE;
 
-       for (i = info->intr_start_idx[MIC_INTR_DB];
-                       i < info->intr_len[MIC_INTR_DB]; i++)
-               if (mask & BIT(i))
-                       mic_invoke_callback(mdev, i);
-
-       return IRQ_HANDLED;
+       spin_lock(&irq_info->mic_intr_lock);
+       for (i = intr_info->intr_start_idx[MIC_INTR_DB];
+                       i < intr_info->intr_len[MIC_INTR_DB]; i++)
+               if (mask & BIT(i)) {
+                       list_for_each_entry(intr_cb, &irq_info->cb_list[i],
+                                           list)
+                               if (intr_cb->handler)
+                                       intr_cb->handler(pdev->irq,
+                                                        intr_cb->data);
+                       set_bit(i, &irq_info->mask);
+               }
+       spin_unlock(&irq_info->mic_intr_lock);
+       return IRQ_WAKE_THREAD;
 }
 
 /* Return the interrupt offset from the index. Index is 0 based. */
@@ -99,14 +111,15 @@ static struct msix_entry *mic_get_available_vector(struct mic_device *mdev)
  *
  * @mdev: pointer to the mic_device instance
  * @idx: The source id to be registered.
- * @func: The function to be called when the source id receives
+ * @handler: The function to be called when the source id receives
  * the interrupt.
+ * @thread_fn: thread fn. corresponding to the handler
  * @data: Private data of the requester.
  * Return the callback structure that was registered or an
  * appropriate error on failure.
  */
 static struct mic_intr_cb *mic_register_intr_callback(struct mic_device *mdev,
-                       u8 idx, irqreturn_t (*func) (int irq, void *dev),
+                       u8 idx, irq_handler_t handler, irq_handler_t thread_fn,
                        void *data)
 {
        struct mic_intr_cb *intr_cb;
@@ -117,7 +130,8 @@ static struct mic_intr_cb *mic_register_intr_callback(struct mic_device *mdev,
        if (!intr_cb)
                return ERR_PTR(-ENOMEM);
 
-       intr_cb->func = func;
+       intr_cb->handler = handler;
+       intr_cb->thread_fn = thread_fn;
        intr_cb->data = data;
        intr_cb->cb_id = ida_simple_get(&mdev->irq_info.cb_ida,
                0, 0, GFP_KERNEL);
@@ -126,9 +140,11 @@ static struct mic_intr_cb *mic_register_intr_callback(struct mic_device *mdev,
                goto ida_fail;
        }
 
+       spin_lock(&mdev->irq_info.mic_thread_lock);
        spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
        list_add_tail(&intr_cb->list, &mdev->irq_info.cb_list[idx]);
        spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
+       spin_unlock(&mdev->irq_info.mic_thread_lock);
 
        return intr_cb;
 ida_fail:
@@ -152,8 +168,9 @@ static u8 mic_unregister_intr_callback(struct mic_device *mdev, u32 idx)
        unsigned long flags;
        int i;
 
+       spin_lock(&mdev->irq_info.mic_thread_lock);
+       spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
        for (i = 0;  i < MIC_NUM_OFFSETS; i++) {
-               spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
                list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) {
                        intr_cb = list_entry(pos, struct mic_intr_cb, list);
                        if (intr_cb->cb_id == idx) {
@@ -163,11 +180,13 @@ static u8 mic_unregister_intr_callback(struct mic_device *mdev, u32 idx)
                                kfree(intr_cb);
                                spin_unlock_irqrestore(
                                        &mdev->irq_info.mic_intr_lock, flags);
+                               spin_unlock(&mdev->irq_info.mic_thread_lock);
                                return i;
                        }
                }
-               spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
        }
+       spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
+       spin_unlock(&mdev->irq_info.mic_thread_lock);
        return MIC_NUM_OFFSETS;
 }
 
@@ -242,6 +261,7 @@ static int mic_setup_callbacks(struct mic_device *mdev)
                INIT_LIST_HEAD(&mdev->irq_info.cb_list[i]);
        ida_init(&mdev->irq_info.cb_ida);
        spin_lock_init(&mdev->irq_info.mic_intr_lock);
+       spin_lock_init(&mdev->irq_info.mic_thread_lock);
        return 0;
 }
 
@@ -258,14 +278,12 @@ static void mic_release_callbacks(struct mic_device *mdev)
        struct mic_intr_cb *intr_cb;
        int i;
 
+       spin_lock(&mdev->irq_info.mic_thread_lock);
+       spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
        for (i = 0; i < MIC_NUM_OFFSETS; i++) {
-               spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
 
-               if (list_empty(&mdev->irq_info.cb_list[i])) {
-                       spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock,
-                                              flags);
+               if (list_empty(&mdev->irq_info.cb_list[i]))
                        break;
-               }
 
                list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) {
                        intr_cb = list_entry(pos, struct mic_intr_cb, list);
@@ -274,8 +292,9 @@ static void mic_release_callbacks(struct mic_device *mdev)
                                          intr_cb->cb_id);
                        kfree(intr_cb);
                }
-               spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
        }
+       spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
+       spin_unlock(&mdev->irq_info.mic_thread_lock);
        ida_destroy(&mdev->irq_info.cb_ida);
        kfree(mdev->irq_info.cb_list);
 }
@@ -313,7 +332,8 @@ static int mic_setup_msi(struct mic_device *mdev, struct pci_dev *pdev)
                goto err_nomem2;
        }
 
-       rc = request_irq(pdev->irq, mic_interrupt, 0 , "mic-msi", mdev);
+       rc = request_threaded_irq(pdev->irq, mic_interrupt, mic_thread_fn,
+                                 0, "mic-msi", mdev);
        if (rc) {
                dev_err(&pdev->dev, "Error allocating MSI interrupt\n");
                goto err_irq_req_fail;
@@ -353,8 +373,8 @@ static int mic_setup_intx(struct mic_device *mdev, struct pci_dev *pdev)
                goto err_nomem;
        }
 
-       rc = request_irq(pdev->irq, mic_interrupt,
-               IRQF_SHARED, "mic-intx", mdev);
+       rc = request_threaded_irq(pdev->irq, mic_interrupt, mic_thread_fn,
+                                 IRQF_SHARED, "mic-intx", mdev);
        if (rc)
                goto err;
 
@@ -391,13 +411,14 @@ int mic_next_db(struct mic_device *mdev)
 #define MK_COOKIE(x, y) ((x) | (y) << COOKIE_ID_SHIFT)
 
 /**
- * mic_request_irq - request an irq. mic_mutex needs
+ * mic_request_threaded_irq - request an irq. mic_mutex needs
  * to be held before calling this function.
  *
  * @mdev: pointer to mic_device instance
- * @func: The callback function that handles the interrupt.
+ * @handler: The callback function that handles the interrupt.
  * The function needs to call ack_interrupts
  * (mdev->ops->ack_interrupt(mdev)) when handling the interrupts.
+ * @thread_fn: thread fn required by request_threaded_irq.
  * @name: The ASCII name of the callee requesting the irq.
  * @data: private data that is returned back when calling the
  * function handler.
@@ -412,10 +433,11 @@ int mic_next_db(struct mic_device *mdev)
  * error code.
  *
  */
-struct mic_irq *mic_request_irq(struct mic_device *mdev,
-       irqreturn_t (*func)(int irq, void *dev),
-       const char *name, void *data, int intr_src,
-       enum mic_intr_type type)
+struct mic_irq *
+mic_request_threaded_irq(struct mic_device *mdev,
+                        irq_handler_t handler, irq_handler_t thread_fn,
+                        const char *name, void *data, int intr_src,
+                        enum mic_intr_type type)
 {
        u16 offset;
        int rc = 0;
@@ -444,7 +466,8 @@ struct mic_irq *mic_request_irq(struct mic_device *mdev,
                        goto err;
                }
 
-               rc = request_irq(msix->vector, func, 0, name, data);
+               rc = request_threaded_irq(msix->vector, handler, thread_fn,
+                                         0, name, data);
                if (rc) {
                        dev_dbg(mdev->sdev->parent,
                                "request irq failed rc = %d\n", rc);
@@ -458,8 +481,8 @@ struct mic_irq *mic_request_irq(struct mic_device *mdev,
                dev_dbg(mdev->sdev->parent, "irq: %d assigned for src: %d\n",
                        msix->vector, intr_src);
        } else {
-               intr_cb = mic_register_intr_callback(mdev,
-                               offset, func, data);
+               intr_cb = mic_register_intr_callback(mdev, offset, handler,
+                                                    thread_fn, data);
                if (IS_ERR(intr_cb)) {
                        dev_err(mdev->sdev->parent,
                                "No available callback entries for use\n");
@@ -487,9 +510,9 @@ err:
  *  needs to be held before calling this function.
  *
  * @mdev: pointer to mic_device instance
- * @cookie: cookie obtained during a successful call to mic_request_irq
+ * @cookie: cookie obtained during a successful call to mic_request_threaded_irq
  * @data: private data specified by the calling function during the
- * mic_request_irq
+ * mic_request_threaded_irq
  *
  * returns: none.
  */
index 6091aa9..9f783d4 100644 (file)
 #ifndef _MIC_INTR_H_
 #define _MIC_INTR_H_
 
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
 /*
  * The minimum number of msix vectors required for normal operation.
  * 3 for virtio network, console and block devices.
  * 1 for card shutdown notifications.
+ * 4 for host owned DMA channels.
  */
-#define MIC_MIN_MSIX 4
+#define MIC_MIN_MSIX 8
 #define MIC_NUM_OFFSETS 32
 
 /**
@@ -68,7 +71,11 @@ struct mic_intr_info {
  * @num_vectors: The number of MSI/MSI-x vectors that have been allocated.
  * @cb_ida: callback ID allocator to track the callbacks registered.
  * @mic_intr_lock: spinlock to protect the interrupt callback list.
+ * @mic_thread_lock: spinlock to protect the thread callback list.
+ *                This lock is used to protect against thread_fn while
+ *                mic_intr_lock is used to protect against interrupt handler.
  * @cb_list: Array of callback lists one for each source.
+ * @mask: Mask used by the main thread fn to call the underlying thread fns.
  */
 struct mic_irq_info {
        int next_avail_src;
@@ -77,19 +84,23 @@ struct mic_irq_info {
        u16 num_vectors;
        struct ida cb_ida;
        spinlock_t mic_intr_lock;
+       spinlock_t mic_thread_lock;
        struct list_head *cb_list;
+       unsigned long mask;
 };
 
 /**
  * struct mic_intr_cb - Interrupt callback structure.
  *
- * @func: The callback function
+ * @handler: The callback function
+ * @thread_fn: The thread_fn.
  * @data: Private data of the requester.
  * @cb_id: The callback id. Identifies this callback.
  * @list: list head pointing to the next callback structure.
  */
 struct mic_intr_cb {
-       irqreturn_t (*func) (int irq, void *data);
+       irq_handler_t handler;
+       irq_handler_t thread_fn;
        void *data;
        int cb_id;
        struct list_head list;
@@ -124,11 +135,11 @@ struct mic_hw_intr_ops {
 };
 
 int mic_next_db(struct mic_device *mdev);
-struct mic_irq *mic_request_irq(struct mic_device *mdev,
-       irqreturn_t (*func)(int irq, void *data),
-       const char *name, void *data, int intr_src,
-       enum mic_intr_type type);
-
+struct mic_irq *
+mic_request_threaded_irq(struct mic_device *mdev,
+                        irq_handler_t handler, irq_handler_t thread_fn,
+                        const char *name, void *data, int intr_src,
+                        enum mic_intr_type type);
 void mic_free_irq(struct mic_device *mdev,
                struct mic_irq *cookie, void *data);
 int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev);
index c04a021..ab37a31 100644 (file)
@@ -38,7 +38,7 @@
 
 static const char mic_driver_name[] = "mic";
 
-static DEFINE_PCI_DEVICE_TABLE(mic_pci_tbl) = {
+static const struct pci_device_id mic_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2250)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2251)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2252)},
@@ -389,8 +389,9 @@ static int mic_probe(struct pci_dev *pdev,
        mutex_lock(&mdev->mic_mutex);
 
        mdev->shutdown_db = mic_next_db(mdev);
-       mdev->shutdown_cookie = mic_request_irq(mdev, mic_shutdown_db,
-               "shutdown-interrupt", mdev, mdev->shutdown_db, MIC_INTR_DB);
+       mdev->shutdown_cookie = mic_request_threaded_irq(mdev, mic_shutdown_db,
+                                       NULL, "shutdown-interrupt", mdev,
+                                       mdev->shutdown_db, MIC_INTR_DB);
        if (IS_ERR(mdev->shutdown_cookie)) {
                rc = PTR_ERR(mdev->shutdown_cookie);
                mutex_unlock(&mdev->mic_mutex);
index 7e1ef0e..a020e4e 100644 (file)
 #include <linux/pci.h>
 #include <linux/sched.h>
 #include <linux/uaccess.h>
-
+#include <linux/dmaengine.h>
 #include <linux/mic_common.h>
+
 #include "../common/mic_dev.h"
 #include "mic_device.h"
 #include "mic_smpt.h"
 #include "mic_virtio.h"
 
 /*
- * Initiates the copies across the PCIe bus from card memory to
- * a user space buffer.
+ * Size of the internal buffer used during DMA's as an intermediate buffer
+ * for copy to/from user.
  */
-static int mic_virtio_copy_to_user(struct mic_vdev *mvdev,
-               void __user *ubuf, size_t len, u64 addr)
+#define MIC_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
+
+static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst,
+                       dma_addr_t src, size_t len)
 {
-       int err;
-       void __iomem *dbuf = mvdev->mdev->aper.va + addr;
-       /*
-        * We are copying from IO below an should ideally use something
-        * like copy_to_user_fromio(..) if it existed.
-        */
-       if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
-               err = -EFAULT;
-               dev_err(mic_dev(mvdev), "%s %d err %d\n",
+       int err = 0;
+       struct dma_async_tx_descriptor *tx;
+       struct dma_chan *mic_ch = mdev->dma_ch;
+
+       if (!mic_ch) {
+               err = -EBUSY;
+               goto error;
+       }
+
+       tx = mic_ch->device->device_prep_dma_memcpy(mic_ch, dst, src, len,
+                                                   DMA_PREP_FENCE);
+       if (!tx) {
+               err = -ENOMEM;
+               goto error;
+       } else {
+               dma_cookie_t cookie = tx->tx_submit(tx);
+
+               err = dma_submit_error(cookie);
+               if (err)
+                       goto error;
+               err = dma_sync_wait(mic_ch, cookie);
+       }
+error:
+       if (err)
+               dev_err(mdev->sdev->parent, "%s %d err %d\n",
                        __func__, __LINE__, err);
-               goto err;
+       return err;
+}
+
+/*
+ * Initiates the copies across the PCIe bus from card memory to a user
+ * space buffer. When transfers are done using DMA, source/destination
+ * addresses and transfer length must follow the alignment requirements of
+ * the MIC DMA engine.
+ */
+static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, void __user *ubuf,
+                                  size_t len, u64 daddr, size_t dlen,
+                                  int vr_idx)
+{
+       struct mic_device *mdev = mvdev->mdev;
+       void __iomem *dbuf = mdev->aper.va + daddr;
+       struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
+       size_t dma_alignment = 1 << mdev->dma_ch->device->copy_align;
+       size_t dma_offset;
+       size_t partlen;
+       int err;
+
+       dma_offset = daddr - round_down(daddr, dma_alignment);
+       daddr -= dma_offset;
+       len += dma_offset;
+
+       while (len) {
+               partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
+
+               err = mic_sync_dma(mdev, mvr->buf_da, daddr,
+                                  ALIGN(partlen, dma_alignment));
+               if (err)
+                       goto err;
+
+               if (copy_to_user(ubuf, mvr->buf + dma_offset,
+                                partlen - dma_offset)) {
+                       err = -EFAULT;
+                       goto err;
+               }
+               daddr += partlen;
+               ubuf += partlen;
+               dbuf += partlen;
+               mvdev->in_bytes_dma += partlen;
+               mvdev->in_bytes += partlen;
+               len -= partlen;
+               dma_offset = 0;
        }
-       mvdev->in_bytes += len;
-       err = 0;
+       return 0;
 err:
+       dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
        return err;
 }
 
 /*
- * Initiates copies across the PCIe bus from a user space
- * buffer to card memory.
+ * Initiates copies across the PCIe bus from a user space buffer to card
+ * memory. When transfers are done using DMA, source/destination addresses
+ * and transfer length must follow the alignment requirements of the MIC
+ * DMA engine.
  */
-static int mic_virtio_copy_from_user(struct mic_vdev *mvdev,
-               void __user *ubuf, size_t len, u64 addr)
+static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, void __user *ubuf,
+                                    size_t len, u64 daddr, size_t dlen,
+                                    int vr_idx)
 {
+       struct mic_device *mdev = mvdev->mdev;
+       void __iomem *dbuf = mdev->aper.va + daddr;
+       struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
+       size_t dma_alignment = 1 << mdev->dma_ch->device->copy_align;
+       size_t partlen;
        int err;
-       void __iomem *dbuf = mvdev->mdev->aper.va + addr;
+
+       if (daddr & (dma_alignment - 1)) {
+               mvdev->tx_dst_unaligned += len;
+               goto memcpy;
+       } else if (ALIGN(len, dma_alignment) > dlen) {
+               mvdev->tx_len_unaligned += len;
+               goto memcpy;
+       }
+
+       while (len) {
+               partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
+
+               if (copy_from_user(mvr->buf, ubuf, partlen)) {
+                       err = -EFAULT;
+                       goto err;
+               }
+               err = mic_sync_dma(mdev, daddr, mvr->buf_da,
+                                  ALIGN(partlen, dma_alignment));
+               if (err)
+                       goto err;
+               daddr += partlen;
+               ubuf += partlen;
+               dbuf += partlen;
+               mvdev->out_bytes_dma += partlen;
+               mvdev->out_bytes += partlen;
+               len -= partlen;
+       }
+memcpy:
        /*
         * We are copying to IO below and should ideally use something
         * like copy_from_user_toio(..) if it existed.
         */
        if (copy_from_user((void __force *)dbuf, ubuf, len)) {
                err = -EFAULT;
-               dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                       __func__, __LINE__, err);
                goto err;
        }
        mvdev->out_bytes += len;
-       err = 0;
+       return 0;
 err:
+       dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
        return err;
 }
 
@@ -110,7 +207,8 @@ static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov)
  * way to override the VRINGH xfer(..) routines as of v3.10.
  */
 static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
-       void __user *ubuf, size_t len, bool read, size_t *out_len)
+                       void __user *ubuf, size_t len, bool read, int vr_idx,
+                       size_t *out_len)
 {
        int ret = 0;
        size_t partlen, tot_len = 0;
@@ -118,13 +216,15 @@ static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
        while (len && iov->i < iov->used) {
                partlen = min(iov->iov[iov->i].iov_len, len);
                if (read)
-                       ret = mic_virtio_copy_to_user(mvdev,
-                               ubuf, partlen,
-                               (u64)iov->iov[iov->i].iov_base);
+                       ret = mic_virtio_copy_to_user(mvdev, ubuf, partlen,
+                                               (u64)iov->iov[iov->i].iov_base,
+                                               iov->iov[iov->i].iov_len,
+                                               vr_idx);
                else
-                       ret = mic_virtio_copy_from_user(mvdev,
-                               ubuf, partlen,
-                               (u64)iov->iov[iov->i].iov_base);
+                       ret = mic_virtio_copy_from_user(mvdev, ubuf, partlen,
+                                               (u64)iov->iov[iov->i].iov_base,
+                                               iov->iov[iov->i].iov_len,
+                                               vr_idx);
                if (ret) {
                        dev_err(mic_dev(mvdev), "%s %d err %d\n",
                                __func__, __LINE__, ret);
@@ -192,8 +292,8 @@ static int _mic_virtio_copy(struct mic_vdev *mvdev,
                        ubuf = iov.iov_base;
                }
                /* Issue all the read descriptors first */
-               ret = mic_vringh_copy(mvdev, riov, ubuf, len,
-                       MIC_VRINGH_READ, &out_len);
+               ret = mic_vringh_copy(mvdev, riov, ubuf, len, MIC_VRINGH_READ,
+                                     copy->vr_idx, &out_len);
                if (ret) {
                        dev_err(mic_dev(mvdev), "%s %d err %d\n",
                                __func__, __LINE__, ret);
@@ -203,8 +303,8 @@ static int _mic_virtio_copy(struct mic_vdev *mvdev,
                ubuf += out_len;
                copy->out_len += out_len;
                /* Issue the write descriptors next */
-               ret = mic_vringh_copy(mvdev, wiov, ubuf, len,
-                       !MIC_VRINGH_READ, &out_len);
+               ret = mic_vringh_copy(mvdev, wiov, ubuf, len, !MIC_VRINGH_READ,
+                                     copy->vr_idx, &out_len);
                if (ret) {
                        dev_err(mic_dev(mvdev), "%s %d err %d\n",
                                __func__, __LINE__, ret);
@@ -589,13 +689,19 @@ int mic_virtio_add_device(struct mic_vdev *mvdev,
                dev_dbg(mdev->sdev->parent,
                        "%s %d index %d va %p info %p vr_size 0x%x\n",
                        __func__, __LINE__, i, vr->va, vr->info, vr_size);
+               mvr->buf = (void *)__get_free_pages(GFP_KERNEL,
+                                       get_order(MIC_INT_DMA_BUF_SIZE));
+               mvr->buf_da = mic_map_single(mvdev->mdev, mvr->buf,
+                                         MIC_INT_DMA_BUF_SIZE);
        }
 
        snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id,
                 mvdev->virtio_id);
        mvdev->virtio_db = mic_next_db(mdev);
-       mvdev->virtio_cookie = mic_request_irq(mdev, mic_virtio_intr_handler,
-                       irqname, mvdev, mvdev->virtio_db, MIC_INTR_DB);
+       mvdev->virtio_cookie = mic_request_threaded_irq(mdev,
+                                              mic_virtio_intr_handler,
+                                              NULL, irqname, mvdev,
+                                              mvdev->virtio_db, MIC_INTR_DB);
        if (IS_ERR(mvdev->virtio_cookie)) {
                ret = PTR_ERR(mvdev->virtio_cookie);
                dev_dbg(mdev->sdev->parent, "request irq failed\n");
@@ -671,6 +777,11 @@ skip_hot_remove:
        vqconfig = mic_vq_config(mvdev->dd);
        for (i = 0; i < mvdev->dd->num_vq; i++) {
                struct mic_vringh *mvr = &mvdev->mvr[i];
+
+               mic_unmap_single(mvdev->mdev, mvr->buf_da,
+                                MIC_INT_DMA_BUF_SIZE);
+               free_pages((unsigned long)mvr->buf,
+                          get_order(MIC_INT_DMA_BUF_SIZE));
                vringh_kiov_cleanup(&mvr->riov);
                vringh_kiov_cleanup(&mvr->wiov);
                mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address),
index 184f3c8..d574efb 100644 (file)
  * @vrh: The host VRINGH used for accessing the card vrings.
  * @riov: The VRINGH read kernel IOV.
  * @wiov: The VRINGH write kernel IOV.
- * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
  * @vr_mutex: Mutex for synchronizing access to the VRING.
+ * @buf: Temporary kernel buffer used to copy in/out data
+ * from/to the card via DMA.
+ * @buf_da: dma address of buf.
  * @mvdev: Back pointer to MIC virtio device for vringh_notify(..).
+ * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
  */
 struct mic_vringh {
        struct mic_vring vring;
        struct vringh vrh;
        struct vringh_kiov riov;
        struct vringh_kiov wiov;
-       u16 head;
        struct mutex vr_mutex;
+       void *buf;
+       dma_addr_t buf_da;
        struct mic_vdev *mvdev;
+       u16 head;
 };
 
 /**
@@ -69,6 +74,14 @@ struct mic_vringh {
  * @poll_wake - Used for waking up threads blocked in poll.
  * @out_bytes - Debug stats for number of bytes copied from host to card.
  * @in_bytes - Debug stats for number of bytes copied from card to host.
+ * @out_bytes_dma - Debug stats for number of bytes copied from host to card
+ * using DMA.
+ * @in_bytes_dma - Debug stats for number of bytes copied from card to host
+ * using DMA.
+ * @tx_len_unaligned - Debug stats for number of bytes copied to the card where
+ * the transfer length did not have the required DMA alignment.
+ * @tx_dst_unaligned - Debug stats for number of bytes copied where the
+ * destination address on the card did not have the required DMA alignment.
  * @mvr - Store per VRING data structures.
  * @virtio_bh_work - Work struct used to schedule virtio bottom half handling.
  * @dd - Virtio device descriptor.
@@ -84,6 +97,10 @@ struct mic_vdev {
        int poll_wake;
        unsigned long out_bytes;
        unsigned long in_bytes;
+       unsigned long out_bytes_dma;
+       unsigned long in_bytes_dma;
+       unsigned long tx_len_unaligned;
+       unsigned long tx_dst_unaligned;
        struct mic_vringh mvr[MIC_MAX_VRINGS];
        struct work_struct virtio_bh_work;
        struct mic_device_desc *dd;
index 5562fdd..b7a21e1 100644 (file)
@@ -549,6 +549,13 @@ struct mic_smpt_ops mic_x100_smpt_ops = {
        .set = mic_x100_smpt_set,
 };
 
+static bool mic_x100_dma_filter(struct dma_chan *chan, void *param)
+{
+       if (chan->device->dev->parent == (struct device *)param)
+               return true;
+       return false;
+}
+
 struct mic_hw_ops mic_x100_ops = {
        .aper_bar = MIC_X100_APER_BAR,
        .mmio_bar = MIC_X100_MMIO_BAR,
@@ -563,6 +570,7 @@ struct mic_hw_ops mic_x100_ops = {
        .send_firmware_intr = mic_x100_send_firmware_intr,
        .load_mic_fw = mic_x100_load_firmware,
        .get_postcode = mic_x100_get_postcode,
+       .dma_filter = mic_x100_dma_filter,
 };
 
 struct mic_hw_intr_ops mic_x100_intr_ops = {
index 9d3dbb2..21c2337 100644 (file)
@@ -244,7 +244,8 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
        if (version & 0x8000)
                maj_ver |= 0x0008;
 
-       sprintf(bts_scr_name, "TIInit_%d.%d.%d.bts", chip, maj_ver, min_ver);
+       sprintf(bts_scr_name, "ti-connectivity/TIInit_%d.%d.%d.bts",
+               chip, maj_ver, min_ver);
 
        /* to be accessed later via sysfs entry */
        kim_gdata->version.full = version;
@@ -287,7 +288,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
        long len = 0;
        unsigned char *ptr = NULL;
        unsigned char *action_ptr = NULL;
-       unsigned char bts_scr_name[30] = { 0 }; /* 30 char long bts scr name? */
+       unsigned char bts_scr_name[40] = { 0 }; /* 40 char long bts scr name? */
        int wr_room_space;
        int cmd_size;
        unsigned long timeout;
@@ -778,7 +779,7 @@ static int kim_probe(struct platform_device *pdev)
        pr_info("sysfs entries created\n");
 
        kim_debugfs_dir = debugfs_create_dir("ti-st", NULL);
-       if (IS_ERR(kim_debugfs_dir)) {
+       if (!kim_debugfs_dir) {
                pr_err(" debugfs entries creation failed ");
                err = -EIO;
                goto err_debugfs_dir;
@@ -788,7 +789,6 @@ static int kim_probe(struct platform_device *pdev)
                                kim_gdata, &version_debugfs_fops);
        debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir,
                                kim_gdata, &list_debugfs_fops);
-       pr_info(" debugfs entries created ");
        return 0;
 
 err_debugfs_dir:
index 3250fc1..b3a8123 100644 (file)
@@ -130,7 +130,7 @@ static int vexpress_syscfg_write(void *context, unsigned int index,
        return vexpress_syscfg_exec(func, index, true, &val);
 }
 
-struct regmap_config vexpress_syscfg_regmap_config = {
+static struct regmap_config vexpress_syscfg_regmap_config = {
        .lock = vexpress_config_lock,
        .unlock = vexpress_config_unlock,
        .reg_bits = 32,
@@ -276,7 +276,7 @@ int vexpress_syscfg_device_register(struct platform_device *pdev)
 }
 
 
-int vexpress_syscfg_probe(struct platform_device *pdev)
+static int vexpress_syscfg_probe(struct platform_device *pdev)
 {
        struct vexpress_syscfg *syscfg;
        struct resource *res;
index e0d5017..248399a 100644 (file)
@@ -748,7 +748,7 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
        /* The rest are managed resources and will be freed by PCI core */
 }
 
-static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = {
+static const struct pci_device_id vmci_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), },
        { 0 },
 };
index 3f8e3db..d04c5ad 100644 (file)
@@ -582,7 +582,7 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
                        WARN_ONCE(pci_dev->current_state != prev,
                                "PCI PM: Device state not saved by %pF\n",
                                drv->suspend_late);
-                       return 0;
+                       goto Fixup;
                }
        }
 
@@ -591,6 +591,9 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
 
        pci_pm_set_unknown_state(pci_dev);
 
+Fixup:
+       pci_fixup_device(pci_fixup_suspend_late, pci_dev);
+
        return 0;
 }
 
@@ -734,7 +737,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
 
        if (!pm) {
                pci_save_state(pci_dev);
-               return 0;
+               goto Fixup;
        }
 
        if (pm->suspend_noirq) {
@@ -751,7 +754,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
                        WARN_ONCE(pci_dev->current_state != prev,
                                "PCI PM: State of device not saved by %pF\n",
                                pm->suspend_noirq);
-                       return 0;
+                       goto Fixup;
                }
        }
 
@@ -775,6 +778,9 @@ static int pci_pm_suspend_noirq(struct device *dev)
        if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
                pci_write_config_word(pci_dev, PCI_COMMAND, 0);
 
+Fixup:
+       pci_fixup_device(pci_fixup_suspend_late, pci_dev);
+
        return 0;
 }
 
@@ -999,8 +1005,10 @@ static int pci_pm_poweroff_noirq(struct device *dev)
        if (pci_has_legacy_pm_support(to_pci_dev(dev)))
                return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
 
-       if (!drv || !drv->pm)
+       if (!drv || !drv->pm) {
+               pci_fixup_device(pci_fixup_suspend_late, pci_dev);
                return 0;
+       }
 
        if (drv->pm->poweroff_noirq) {
                int error;
@@ -1021,6 +1029,8 @@ static int pci_pm_poweroff_noirq(struct device *dev)
        if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
                pci_write_config_word(pci_dev, PCI_COMMAND, 0);
 
+       pci_fixup_device(pci_fixup_suspend_late, pci_dev);
+
        if (pcibios_pm_ops.poweroff_noirq)
                return pcibios_pm_ops.poweroff_noirq(dev);
 
index ad56682..80c2d01 100644 (file)
@@ -2986,6 +2986,103 @@ DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
                         quirk_broken_intx_masking);
 
+#ifdef CONFIG_ACPI
+/*
+ * Apple: Shutdown Cactus Ridge Thunderbolt controller.
+ *
+ * On Apple hardware the Cactus Ridge Thunderbolt controller needs to be
+ * shutdown before suspend. Otherwise the native host interface (NHI) will not
+ * be present after resume if a device was plugged in before suspend.
+ *
+ * The thunderbolt controller consists of a pcie switch with downstream
+ * bridges leading to the NHI and to the tunnel pci bridges.
+ *
+ * This quirk cuts power to the whole chip. Therefore we have to apply it
+ * during suspend_noirq of the upstream bridge.
+ *
+ * Power is automagically restored before resume. No action is needed.
+ */
+static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
+{
+       acpi_handle bridge, SXIO, SXFP, SXLV;
+
+       if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
+               return;
+       if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
+               return;
+       bridge = ACPI_HANDLE(&dev->dev);
+       if (!bridge)
+               return;
+       /*
+        * SXIO and SXLV are present only on machines requiring this quirk.
+        * TB bridges in external devices might have the same device id as those
+        * on the host, but they will not have the associated ACPI methods. This
+        * implicitly checks that we are at the right bridge.
+        */
+       if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
+           || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
+           || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
+               return;
+       dev_info(&dev->dev, "quirk: cutting power to thunderbolt controller...\n");
+
+       /* magic sequence */
+       acpi_execute_simple_method(SXIO, NULL, 1);
+       acpi_execute_simple_method(SXFP, NULL, 0);
+       msleep(300);
+       acpi_execute_simple_method(SXLV, NULL, 0);
+       acpi_execute_simple_method(SXIO, NULL, 0);
+       acpi_execute_simple_method(SXLV, NULL, 0);
+}
+DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, 0x1547,
+                              quirk_apple_poweroff_thunderbolt);
+
+/*
+ * Apple: Wait for the thunderbolt controller to reestablish pci tunnels.
+ *
+ * During suspend the thunderbolt controller is reset and all pci
+ * tunnels are lost. The NHI driver will try to reestablish all tunnels
+ * during resume. We have to manually wait for the NHI since there is
+ * no parent child relationship between the NHI and the tunneled
+ * bridges.
+ */
+static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
+{
+       struct pci_dev *sibling = NULL;
+       struct pci_dev *nhi = NULL;
+
+       if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
+               return;
+       if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
+               return;
+       /*
+        * Find the NHI and confirm that we are a bridge on the tb host
+        * controller and not on a tb endpoint.
+        */
+       sibling = pci_get_slot(dev->bus, 0x0);
+       if (sibling == dev)
+               goto out; /* we are the downstream bridge to the NHI */
+       if (!sibling || !sibling->subordinate)
+               goto out;
+       nhi = pci_get_slot(sibling->subordinate, 0x0);
+       if (!nhi)
+               goto out;
+       if (nhi->vendor != PCI_VENDOR_ID_INTEL
+                       || (nhi->device != 0x1547 && nhi->device != 0x156c)
+                       || nhi->subsystem_vendor != 0x2222
+                       || nhi->subsystem_device != 0x1111)
+               goto out;
+       dev_info(&dev->dev, "quirk: wating for thunderbolt to reestablish pci tunnels...\n");
+       device_pm_wait_for_dev(&dev->dev, &nhi->dev);
+out:
+       pci_dev_put(nhi);
+       pci_dev_put(sibling);
+}
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x1547,
+                              quirk_apple_wait_for_thunderbolt);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x156d,
+                              quirk_apple_wait_for_thunderbolt);
+#endif
+
 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
                          struct pci_fixup *end)
 {
@@ -3018,6 +3115,8 @@ extern struct pci_fixup __start_pci_fixups_resume_early[];
 extern struct pci_fixup __end_pci_fixups_resume_early[];
 extern struct pci_fixup __start_pci_fixups_suspend[];
 extern struct pci_fixup __end_pci_fixups_suspend[];
+extern struct pci_fixup __start_pci_fixups_suspend_late[];
+extern struct pci_fixup __end_pci_fixups_suspend_late[];
 
 static bool pci_apply_fixup_final_quirks;
 
@@ -3063,6 +3162,11 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
                end = __end_pci_fixups_suspend;
                break;
 
+       case pci_fixup_suspend_late:
+               start = __start_pci_fixups_suspend_late;
+               end = __end_pci_fixups_suspend_late;
+               break;
+
        default:
                /* stupid compiler warning, you would think with an enum... */
                return;
index 0c657d6..51cf808 100644 (file)
@@ -202,6 +202,7 @@ config PCMCIA_SA1111
        depends on ARM && SA1111 && PCMCIA
        select PCMCIA_SOC_COMMON
        select PCMCIA_SA11XX_BASE if ARCH_SA1100
+       select PCMCIA_PXA2XX if ARCH_LUBBOCK && SA1111
        help
          Say Y  here to include support for SA1111-based PCMCIA or CF
          sockets, found on the Jornada 720, Graphicsmaster and other
@@ -217,7 +218,6 @@ config PCMCIA_PXA2XX
                    || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2 \
                    || MACH_VPAC270 || MACH_BALLOON3 || MACH_COLIBRI \
                    || MACH_COLIBRI320 || MACH_H4700)
-       select PCMCIA_SA1111 if ARCH_LUBBOCK && SA1111
        select PCMCIA_SOC_COMMON
        help
          Say Y here to include support for the PXA2xx PCMCIA controller
index 7745b51..fd55a69 100644 (file)
@@ -49,6 +49,7 @@ sa1100_cs-y                                   += sa1100_generic.o
 sa1100_cs-$(CONFIG_SA1100_ASSABET)             += sa1100_assabet.o
 sa1100_cs-$(CONFIG_SA1100_CERF)                        += sa1100_cerf.o
 sa1100_cs-$(CONFIG_SA1100_COLLIE)              += pxa2xx_sharpsl.o
+sa1100_cs-$(CONFIG_SA1100_H3100)               += sa1100_h3600.o
 sa1100_cs-$(CONFIG_SA1100_H3600)               += sa1100_h3600.o
 sa1100_cs-$(CONFIG_SA1100_NANOENGINE)          += sa1100_nanoengine.o
 sa1100_cs-$(CONFIG_SA1100_SHANNON)             += sa1100_shannon.o
index 0c6aac1..0802e0b 100644 (file)
@@ -475,7 +475,7 @@ static void bcm63xx_cb_exit(struct pci_dev *dev)
        bcm63xx_cb_dev = NULL;
 }
 
-static DEFINE_PCI_DEVICE_TABLE(bcm63xx_cb_table) = {
+static const struct pci_device_id bcm63xx_cb_table[] = {
        {
                .vendor         = PCI_VENDOR_ID_BROADCOM,
                .device         = BCM6348_CPU_ID,
index 7d47456..aae7e6d 100644 (file)
@@ -25,7 +25,7 @@
 MODULE_LICENSE("GPL");
 
 /* PCI core routines */
-static DEFINE_PCI_DEVICE_TABLE(i82092aa_pci_ids) = {
+static const struct pci_device_id i82092aa_pci_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82092AA_0) },
        { }
 };
index 622dd6f..34ace48 100644 (file)
@@ -764,7 +764,7 @@ static void pd6729_pci_remove(struct pci_dev *dev)
        kfree(socket);
 }
 
-static DEFINE_PCI_DEVICE_TABLE(pd6729_pci_ids) = {
+static const struct pci_device_id pd6729_pci_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6729) },
        { }
 };
index 3baa3ef..40e0403 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/device.h>
 #include <linux/errno.h>
 #include <linux/init.h>
+#include <linux/io.h>
 
 #include <mach/hardware.h>
 #include <asm/hardware/sa1111.h>
@@ -94,6 +95,7 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = {
 int pcmcia_jornada720_init(struct device *dev)
 {
        int ret = -ENODEV;
+       struct sa1111_dev *sadev = SA1111_DEV(dev);
 
        if (machine_is_jornada720()) {
                unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
@@ -101,12 +103,12 @@ int pcmcia_jornada720_init(struct device *dev)
                GRER |= 0x00000002;
 
                /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
-               sa1111_set_io_dir(dev, pin, 0, 0);
-               sa1111_set_io(dev, pin, 0);
-               sa1111_set_sleep_io(dev, pin, 0);
+               sa1111_set_io_dir(sadev, pin, 0, 0);
+               sa1111_set_io(sadev, pin, 0);
+               sa1111_set_sleep_io(sadev, pin, 0);
 
                sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
-               ret = sa1111_pcmcia_add(dev, &jornada720_pcmcia_ops,
+               ret = sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
                                sa11xx_drv_pcmcia_add_one);
        }
 
index d926920..9fb0c3a 100644 (file)
@@ -563,7 +563,7 @@ static int vrc4173_cardu_setup(char *options)
 
 __setup("vrc4173_cardu=", vrc4173_cardu_setup);
 
-static DEFINE_PCI_DEVICE_TABLE(vrc4173_cardu_id_table) = {
+static const struct pci_device_id vrc4173_cardu_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_NAPCCARD) },
         {0, }
 };
index 946f90e..8a23ccb 100644 (file)
@@ -1352,7 +1352,7 @@ static const struct dev_pm_ops yenta_pm_ops = {
                .driver_data    = CARDBUS_TYPE_##type,  \
        }
 
-static DEFINE_PCI_DEVICE_TABLE(yenta_table) = {
+static const struct pci_device_id yenta_table[] = {
        CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1031, TI),
 
        /*
index 653a58b..c67ff05 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/mfd/max77693.h>
 #include <linux/mfd/max77693-private.h>
 #include <linux/regulator/of_regulator.h>
+#include <linux/regmap.h>
 
 #define CHGIN_ILIM_STEP_20mA                   20000
 
@@ -39,9 +40,9 @@
 static int max77693_chg_is_enabled(struct regulator_dev *rdev)
 {
        int ret;
-       u8 val;
+       unsigned int val;
 
-       ret = max77693_read_reg(rdev->regmap, rdev->desc->enable_reg, &val);
+       ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
        if (ret)
                return ret;
 
@@ -57,12 +58,11 @@ static int max77693_chg_get_current_limit(struct regulator_dev *rdev)
 {
        unsigned int chg_min_uA = rdev->constraints->min_uA;
        unsigned int chg_max_uA = rdev->constraints->max_uA;
-       u8 reg, sel;
+       unsigned int reg, sel;
        unsigned int val;
        int ret;
 
-       ret = max77693_read_reg(rdev->regmap,
-                               MAX77693_CHG_REG_CHG_CNFG_09, &reg);
+       ret = regmap_read(rdev->regmap, MAX77693_CHG_REG_CHG_CNFG_09, &reg);
        if (ret < 0)
                return ret;
 
@@ -96,7 +96,7 @@ static int max77693_chg_set_current_limit(struct regulator_dev *rdev,
        /* the first four codes for charger current are all 60mA */
        sel += 3;
 
-       return max77693_write_reg(rdev->regmap,
+       return regmap_write(rdev->regmap,
                                MAX77693_CHG_REG_CHG_CNFG_09, sel);
 }
 /* end of CHARGER regulator ops */
index 3b57807..1d92f51 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/spmi.h>
-#include <linux/module.h>
 #include <linux/pm_runtime.h>
 
 #include <dt-bindings/spmi/spmi.h>
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
new file mode 100644 (file)
index 0000000..c121acc
--- /dev/null
@@ -0,0 +1,13 @@
+menuconfig THUNDERBOLT
+       tristate "Thunderbolt support for Apple devices"
+       depends on PCI
+       select CRC32
+       help
+         Cactus Ridge Thunderbolt Controller driver
+         This driver is required if you want to hotplug Thunderbolt devices on
+         Apple hardware.
+
+         Device chaining is currently not supported.
+
+         To compile this driver a module, choose M here. The module will be
+         called thunderbolt.
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
new file mode 100644 (file)
index 0000000..5d1053c
--- /dev/null
@@ -0,0 +1,3 @@
+obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
+thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
+
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
new file mode 100644 (file)
index 0000000..a7b47e7
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Thunderbolt Cactus Ridge driver - capabilities lookup
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/errno.h>
+
+#include "tb.h"
+
+
+struct tb_cap_any {
+       union {
+               struct tb_cap_basic basic;
+               struct tb_cap_extended_short extended_short;
+               struct tb_cap_extended_long extended_long;
+       };
+} __packed;
+
+static bool tb_cap_is_basic(struct tb_cap_any *cap)
+{
+       /* basic.cap is u8. This checks only the lower 8 bit of cap. */
+       return cap->basic.cap != 5;
+}
+
+static bool tb_cap_is_long(struct tb_cap_any *cap)
+{
+       return !tb_cap_is_basic(cap)
+              && cap->extended_short.next == 0
+              && cap->extended_short.length == 0;
+}
+
+static enum tb_cap tb_cap(struct tb_cap_any *cap)
+{
+       if (tb_cap_is_basic(cap))
+               return cap->basic.cap;
+       else
+               /* extended_short/long have cap at the same offset. */
+               return cap->extended_short.cap;
+}
+
+static u32 tb_cap_next(struct tb_cap_any *cap, u32 offset)
+{
+       int next;
+       if (offset == 1) {
+               /*
+                * The first pointer is part of the switch header and always
+                * a simple pointer.
+                */
+               next = cap->basic.next;
+       } else {
+               /*
+                * Somehow Intel decided to use 3 different types of capability
+                * headers. It is not like anyone could have predicted that
+                * single byte offsets are not enough...
+                */
+               if (tb_cap_is_basic(cap))
+                       next = cap->basic.next;
+               else if (!tb_cap_is_long(cap))
+                       next = cap->extended_short.next;
+               else
+                       next = cap->extended_long.next;
+       }
+       /*
+        * "Hey, we could terminate some capability lists with a null offset
+        *  and others with a pointer to the last element." - "Great idea!"
+        */
+       if (next == offset)
+               return 0;
+       return next;
+}
+
+/**
+ * tb_find_cap() - find a capability
+ *
+ * Return: Returns a positive offset if the capability was found and 0 if not.
+ * Returns an error code on failure.
+ */
+int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap)
+{
+       u32 offset = 1;
+       struct tb_cap_any header;
+       int res;
+       int retries = 10;
+       while (retries--) {
+               res = tb_port_read(port, &header, space, offset, 1);
+               if (res) {
+                       /* Intel needs some help with linked lists. */
+                       if (space == TB_CFG_PORT && offset == 0xa
+                           && port->config.type == TB_TYPE_DP_HDMI_OUT) {
+                               offset = 0x39;
+                               continue;
+                       }
+                       return res;
+               }
+               if (offset != 1) {
+                       if (tb_cap(&header) == cap)
+                               return offset;
+                       if (tb_cap_is_long(&header)) {
+                               /* tb_cap_extended_long is 2 dwords */
+                               res = tb_port_read(port, &header, space,
+                                                  offset, 2);
+                               if (res)
+                                       return res;
+                       }
+               }
+               offset = tb_cap_next(&header, offset);
+               if (!offset)
+                       return 0;
+       }
+       tb_port_WARN(port,
+                    "run out of retries while looking for cap %#x in config space %d, last offset: %#x\n",
+                    cap, space, offset);
+       return -EIO;
+}
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
new file mode 100644 (file)
index 0000000..799634b
--- /dev/null
@@ -0,0 +1,731 @@
+/*
+ * Thunderbolt Cactus Ridge driver - control channel and configuration commands
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/dmapool.h>
+#include <linux/workqueue.h>
+#include <linux/kfifo.h>
+
+#include "ctl.h"
+
+
+struct ctl_pkg {
+       struct tb_ctl *ctl;
+       void *buffer;
+       struct ring_frame frame;
+};
+
+#define TB_CTL_RX_PKG_COUNT 10
+
+/**
+ * struct tb_cfg - thunderbolt control channel
+ */
+struct tb_ctl {
+       struct tb_nhi *nhi;
+       struct tb_ring *tx;
+       struct tb_ring *rx;
+
+       struct dma_pool *frame_pool;
+       struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
+       DECLARE_KFIFO(response_fifo, struct ctl_pkg*, 16);
+       struct completion response_ready;
+
+       hotplug_cb callback;
+       void *callback_data;
+};
+
+
+#define tb_ctl_WARN(ctl, format, arg...) \
+       dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
+
+#define tb_ctl_err(ctl, format, arg...) \
+       dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
+
+#define tb_ctl_warn(ctl, format, arg...) \
+       dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
+
+#define tb_ctl_info(ctl, format, arg...) \
+       dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
+
+
+/* configuration packets definitions */
+
+enum tb_cfg_pkg_type {
+       TB_CFG_PKG_READ = 1,
+       TB_CFG_PKG_WRITE = 2,
+       TB_CFG_PKG_ERROR = 3,
+       TB_CFG_PKG_NOTIFY_ACK = 4,
+       TB_CFG_PKG_EVENT = 5,
+       TB_CFG_PKG_XDOMAIN_REQ = 6,
+       TB_CFG_PKG_XDOMAIN_RESP = 7,
+       TB_CFG_PKG_OVERRIDE = 8,
+       TB_CFG_PKG_RESET = 9,
+       TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd,
+};
+
+/* common header */
+struct tb_cfg_header {
+       u32 route_hi:22;
+       u32 unknown:10; /* highest order bit is set on replies */
+       u32 route_lo;
+} __packed;
+
+/* additional header for read/write packets */
+struct tb_cfg_address {
+       u32 offset:13; /* in dwords */
+       u32 length:6; /* in dwords */
+       u32 port:6;
+       enum tb_cfg_space space:2;
+       u32 seq:2; /* sequence number  */
+       u32 zero:3;
+} __packed;
+
+/* TB_CFG_PKG_READ, response for TB_CFG_PKG_WRITE */
+struct cfg_read_pkg {
+       struct tb_cfg_header header;
+       struct tb_cfg_address addr;
+} __packed;
+
+/* TB_CFG_PKG_WRITE, response for TB_CFG_PKG_READ */
+struct cfg_write_pkg {
+       struct tb_cfg_header header;
+       struct tb_cfg_address addr;
+       u32 data[64]; /* maximum size, tb_cfg_address.length has 6 bits */
+} __packed;
+
+/* TB_CFG_PKG_ERROR */
+struct cfg_error_pkg {
+       struct tb_cfg_header header;
+       enum tb_cfg_error error:4;
+       u32 zero1:4;
+       u32 port:6;
+       u32 zero2:2; /* Both should be zero, still they are different fields. */
+       u32 zero3:16;
+} __packed;
+
+/* TB_CFG_PKG_EVENT */
+struct cfg_event_pkg {
+       struct tb_cfg_header header;
+       u32 port:6;
+       u32 zero:25;
+       bool unplug:1;
+} __packed;
+
+/* TB_CFG_PKG_RESET */
+struct cfg_reset_pkg {
+       struct tb_cfg_header header;
+} __packed;
+
+/* TB_CFG_PKG_PREPARE_TO_SLEEP */
+struct cfg_pts_pkg {
+       struct tb_cfg_header header;
+       u32 data;
+} __packed;
+
+
+/* utility functions */
+
+static u64 get_route(struct tb_cfg_header header)
+{
+       return (u64) header.route_hi << 32 | header.route_lo;
+}
+
+static struct tb_cfg_header make_header(u64 route)
+{
+       struct tb_cfg_header header = {
+               .route_hi = route >> 32,
+               .route_lo = route,
+       };
+       /* check for overflow, route_hi is not 32 bits! */
+       WARN_ON(get_route(header) != route);
+       return header;
+}
+
+static int check_header(struct ctl_pkg *pkg, u32 len, enum tb_cfg_pkg_type type,
+                       u64 route)
+{
+       struct tb_cfg_header *header = pkg->buffer;
+
+       /* check frame, TODO: frame flags */
+       if (WARN(len != pkg->frame.size,
+                       "wrong framesize (expected %#x, got %#x)\n",
+                       len, pkg->frame.size))
+               return -EIO;
+       if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
+                       type, pkg->frame.eof))
+               return -EIO;
+       if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
+                       pkg->frame.sof))
+               return -EIO;
+
+       /* check header */
+       if (WARN(header->unknown != 1 << 9,
+                       "header->unknown is %#x\n", header->unknown))
+               return -EIO;
+       if (WARN(route != get_route(*header),
+                       "wrong route (expected %llx, got %llx)",
+                       route, get_route(*header)))
+               return -EIO;
+       return 0;
+}
+
+static int check_config_address(struct tb_cfg_address addr,
+                               enum tb_cfg_space space, u32 offset,
+                               u32 length)
+{
+       if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
+               return -EIO;
+       if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
+                       space, addr.space))
+               return -EIO;
+       if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
+                       offset, addr.offset))
+               return -EIO;
+       if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
+                       length, addr.length))
+               return -EIO;
+       if (WARN(addr.seq, "addr.seq is %#x\n", addr.seq))
+               return -EIO;
+       /*
+        * We cannot check addr->port as it is set to the upstream port of the
+        * sender.
+        */
+       return 0;
+}
+
+static struct tb_cfg_result decode_error(struct ctl_pkg *response)
+{
+       struct cfg_error_pkg *pkg = response->buffer;
+       struct tb_cfg_result res = { 0 };
+       res.response_route = get_route(pkg->header);
+       res.response_port = 0;
+       res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
+                              get_route(pkg->header));
+       if (res.err)
+               return res;
+
+       WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1);
+       WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1);
+       WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1);
+       res.err = 1;
+       res.tb_error = pkg->error;
+       res.response_port = pkg->port;
+       return res;
+
+}
+
+static struct tb_cfg_result parse_header(struct ctl_pkg *pkg, u32 len,
+                                        enum tb_cfg_pkg_type type, u64 route)
+{
+       struct tb_cfg_header *header = pkg->buffer;
+       struct tb_cfg_result res = { 0 };
+
+       if (pkg->frame.eof == TB_CFG_PKG_ERROR)
+               return decode_error(pkg);
+
+       res.response_port = 0; /* will be updated later for cfg_read/write */
+       res.response_route = get_route(*header);
+       res.err = check_header(pkg, len, type, route);
+       return res;
+}
+
+static void tb_cfg_print_error(struct tb_ctl *ctl,
+                              const struct tb_cfg_result *res)
+{
+       WARN_ON(res->err != 1);
+       switch (res->tb_error) {
+       case TB_CFG_ERROR_PORT_NOT_CONNECTED:
+               /* Port is not connected. This can happen during surprise
+                * removal. Do not warn. */
+               return;
+       case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
+               /*
+                * Invalid cfg_space/offset/length combination in
+                * cfg_read/cfg_write.
+                */
+               tb_ctl_WARN(ctl,
+                       "CFG_ERROR(%llx:%x): Invalid config space of offset\n",
+                       res->response_route, res->response_port);
+               return;
+       case TB_CFG_ERROR_NO_SUCH_PORT:
+               /*
+                * - The route contains a non-existent port.
+                * - The route contains a non-PHY port (e.g. PCIe).
+                * - The port in cfg_read/cfg_write does not exist.
+                */
+               tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
+                       res->response_route, res->response_port);
+               return;
+       case TB_CFG_ERROR_LOOP:
+               tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
+                       res->response_route, res->response_port);
+               return;
+       default:
+               /* 5,6,7,9 and 11 are also valid error codes */
+               tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
+                       res->response_route, res->response_port);
+               return;
+       }
+}
+
+static void cpu_to_be32_array(__be32 *dst, u32 *src, size_t len)
+{
+       int i;
+       for (i = 0; i < len; i++)
+               dst[i] = cpu_to_be32(src[i]);
+}
+
+static void be32_to_cpu_array(u32 *dst, __be32 *src, size_t len)
+{
+       int i;
+       for (i = 0; i < len; i++)
+               dst[i] = be32_to_cpu(src[i]);
+}
+
+static __be32 tb_crc(void *data, size_t len)
+{
+       return cpu_to_be32(~__crc32c_le(~0, data, len));
+}
+
+static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
+{
+       if (pkg) {
+               dma_pool_free(pkg->ctl->frame_pool,
+                             pkg->buffer, pkg->frame.buffer_phy);
+               kfree(pkg);
+       }
+}
+
+static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
+{
+       struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
+       if (!pkg)
+               return NULL;
+       pkg->ctl = ctl;
+       pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
+                                    &pkg->frame.buffer_phy);
+       if (!pkg->buffer) {
+               kfree(pkg);
+               return NULL;
+       }
+       return pkg;
+}
+
+
+/* RX/TX handling */
+
+static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
+                              bool canceled)
+{
+       struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
+       tb_ctl_pkg_free(pkg);
+}
+
+/**
+ * tb_cfg_tx() - transmit a packet on the control channel
+ *
+ * len must be a multiple of four.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_ctl_tx(struct tb_ctl *ctl, void *data, size_t len,
+                    enum tb_cfg_pkg_type type)
+{
+       int res;
+       struct ctl_pkg *pkg;
+       if (len % 4 != 0) { /* required for le->be conversion */
+               tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
+               return -EINVAL;
+       }
+       if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
+               tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
+                           len, TB_FRAME_SIZE - 4);
+               return -EINVAL;
+       }
+       pkg = tb_ctl_pkg_alloc(ctl);
+       if (!pkg)
+               return -ENOMEM;
+       pkg->frame.callback = tb_ctl_tx_callback;
+       pkg->frame.size = len + 4;
+       pkg->frame.sof = type;
+       pkg->frame.eof = type;
+       cpu_to_be32_array(pkg->buffer, data, len / 4);
+       *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
+
+       res = ring_tx(ctl->tx, &pkg->frame);
+       if (res) /* ring is stopped */
+               tb_ctl_pkg_free(pkg);
+       return res;
+}
+
+/**
+ * tb_ctl_handle_plug_event() - acknowledge a plug event, invoke ctl->callback
+ */
+static void tb_ctl_handle_plug_event(struct tb_ctl *ctl,
+                                    struct ctl_pkg *response)
+{
+       struct cfg_event_pkg *pkg = response->buffer;
+       u64 route = get_route(pkg->header);
+
+       if (check_header(response, sizeof(*pkg), TB_CFG_PKG_EVENT, route)) {
+               tb_ctl_warn(ctl, "malformed TB_CFG_PKG_EVENT\n");
+               return;
+       }
+
+       if (tb_cfg_error(ctl, route, pkg->port, TB_CFG_ERROR_ACK_PLUG_EVENT))
+               tb_ctl_warn(ctl, "could not ack plug event on %llx:%x\n",
+                           route, pkg->port);
+       WARN(pkg->zero, "pkg->zero is %#x\n", pkg->zero);
+       ctl->callback(ctl->callback_data, route, pkg->port, pkg->unplug);
+}
+
+static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
+{
+       ring_rx(pkg->ctl->rx, &pkg->frame); /*
+                                            * We ignore failures during stop.
+                                            * All rx packets are referenced
+                                            * from ctl->rx_packets, so we do
+                                            * not loose them.
+                                            */
+}
+
+static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
+                              bool canceled)
+{
+       struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
+
+       if (canceled)
+               return; /*
+                        * ring is stopped, packet is referenced from
+                        * ctl->rx_packets.
+                        */
+
+       if (frame->size < 4 || frame->size % 4 != 0) {
+               tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
+                          frame->size);
+               goto rx;
+       }
+
+       frame->size -= 4; /* remove checksum */
+       if (*(__be32 *) (pkg->buffer + frame->size)
+                       != tb_crc(pkg->buffer, frame->size)) {
+               tb_ctl_err(pkg->ctl,
+                          "RX: checksum mismatch, dropping packet\n");
+               goto rx;
+       }
+       be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
+
+       if (frame->eof == TB_CFG_PKG_EVENT) {
+               tb_ctl_handle_plug_event(pkg->ctl, pkg);
+               goto rx;
+       }
+       if (!kfifo_put(&pkg->ctl->response_fifo, pkg)) {
+               tb_ctl_err(pkg->ctl, "RX: fifo is full\n");
+               goto rx;
+       }
+       complete(&pkg->ctl->response_ready);
+       return;
+rx:
+       tb_ctl_rx_submit(pkg);
+}
+
+/**
+ * tb_ctl_rx() - receive a packet from the control channel
+ */
+static struct tb_cfg_result tb_ctl_rx(struct tb_ctl *ctl, void *buffer,
+                                     size_t length, int timeout_msec,
+                                     u64 route, enum tb_cfg_pkg_type type)
+{
+       struct tb_cfg_result res;
+       struct ctl_pkg *pkg;
+
+       if (!wait_for_completion_timeout(&ctl->response_ready,
+                                        msecs_to_jiffies(timeout_msec))) {
+               tb_ctl_WARN(ctl, "RX: timeout\n");
+               return (struct tb_cfg_result) { .err = -ETIMEDOUT };
+       }
+       if (!kfifo_get(&ctl->response_fifo, &pkg)) {
+               tb_ctl_WARN(ctl, "empty kfifo\n");
+               return (struct tb_cfg_result) { .err = -EIO };
+       }
+
+       res = parse_header(pkg, length, type, route);
+       if (!res.err)
+               memcpy(buffer, pkg->buffer, length);
+       tb_ctl_rx_submit(pkg);
+       return res;
+}
+
+
+/* public interface, alloc/start/stop/free */
+
+/**
+ * tb_ctl_alloc() - allocate a control channel
+ *
+ * cb will be invoked once for every hot plug event.
+ *
+ * Return: Returns a pointer on success or NULL on failure.
+ */
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data)
+{
+       int i;
+       struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+       if (!ctl)
+               return NULL;
+       ctl->nhi = nhi;
+       ctl->callback = cb;
+       ctl->callback_data = cb_data;
+
+       init_completion(&ctl->response_ready);
+       INIT_KFIFO(ctl->response_fifo);
+       ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
+                                        TB_FRAME_SIZE, 4, 0);
+       if (!ctl->frame_pool)
+               goto err;
+
+       ctl->tx = ring_alloc_tx(nhi, 0, 10);
+       if (!ctl->tx)
+               goto err;
+
+       ctl->rx = ring_alloc_rx(nhi, 0, 10);
+       if (!ctl->rx)
+               goto err;
+
+       for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
+               ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
+               if (!ctl->rx_packets[i])
+                       goto err;
+               ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
+       }
+
+       tb_ctl_info(ctl, "control channel created\n");
+       return ctl;
+err:
+       tb_ctl_free(ctl);
+       return NULL;
+}
+
+/**
+ * tb_ctl_free() - free a control channel
+ *
+ * Must be called after tb_ctl_stop.
+ *
+ * Must NOT be called from ctl->callback.
+ */
+void tb_ctl_free(struct tb_ctl *ctl)
+{
+       int i;
+       if (ctl->rx)
+               ring_free(ctl->rx);
+       if (ctl->tx)
+               ring_free(ctl->tx);
+
+       /* free RX packets */
+       for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
+               tb_ctl_pkg_free(ctl->rx_packets[i]);
+
+
+       if (ctl->frame_pool)
+               dma_pool_destroy(ctl->frame_pool);
+       kfree(ctl);
+}
+
+/**
+ * tb_cfg_start() - start/resume the control channel
+ */
+void tb_ctl_start(struct tb_ctl *ctl)
+{
+       int i;
+       tb_ctl_info(ctl, "control channel starting...\n");
+       ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
+       ring_start(ctl->rx);
+       for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
+               tb_ctl_rx_submit(ctl->rx_packets[i]);
+}
+
+/**
+ * control() - pause the control channel
+ *
+ * All invocations of ctl->callback will have finished after this method
+ * returns.
+ *
+ * Must NOT be called from ctl->callback.
+ */
+void tb_ctl_stop(struct tb_ctl *ctl)
+{
+       ring_stop(ctl->rx);
+       ring_stop(ctl->tx);
+
+       if (!kfifo_is_empty(&ctl->response_fifo))
+               tb_ctl_WARN(ctl, "dangling response in response_fifo\n");
+       kfifo_reset(&ctl->response_fifo);
+       tb_ctl_info(ctl, "control channel stopped\n");
+}
+
+/* public interface, commands */
+
+/**
+ * tb_cfg_error() - send error packet
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
+                enum tb_cfg_error error)
+{
+       struct cfg_error_pkg pkg = {
+               .header = make_header(route),
+               .port = port,
+               .error = error,
+       };
+       tb_ctl_info(ctl, "resetting error on %llx:%x.\n", route, port);
+       return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
+}
+
+/**
+ * tb_cfg_reset() - send a reset packet and wait for a response
+ *
+ * If the switch at route is incorrectly configured then we will not receive a
+ * reply (even though the switch will reset). The caller should check for
+ * -ETIMEDOUT and attempt to reconfigure the switch.
+ */
+struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
+                                 int timeout_msec)
+{
+       int err;
+       struct cfg_reset_pkg request = { .header = make_header(route) };
+       struct tb_cfg_header reply;
+
+       err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_RESET);
+       if (err)
+               return (struct tb_cfg_result) { .err = err };
+
+       return tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route,
+                        TB_CFG_PKG_RESET);
+}
+
+/**
+ * tb_cfg_read() - read from config space into buffer
+ *
+ * Offset and length are in dwords.
+ */
+struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
+               u64 route, u32 port, enum tb_cfg_space space,
+               u32 offset, u32 length, int timeout_msec)
+{
+       struct tb_cfg_result res = { 0 };
+       struct cfg_read_pkg request = {
+               .header = make_header(route),
+               .addr = {
+                       .port = port,
+                       .space = space,
+                       .offset = offset,
+                       .length = length,
+               },
+       };
+       struct cfg_write_pkg reply;
+
+       res.err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_READ);
+       if (res.err)
+               return res;
+
+       res = tb_ctl_rx(ctl, &reply, 12 + 4 * length, timeout_msec, route,
+                       TB_CFG_PKG_READ);
+       if (res.err)
+               return res;
+
+       res.response_port = reply.addr.port;
+       res.err = check_config_address(reply.addr, space, offset, length);
+       if (!res.err)
+               memcpy(buffer, &reply.data, 4 * length);
+       return res;
+}
+
+/**
+ * tb_cfg_write() - write from buffer into config space
+ *
+ * Offset and length are in dwords.
+ */
+struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer,
+               u64 route, u32 port, enum tb_cfg_space space,
+               u32 offset, u32 length, int timeout_msec)
+{
+       struct tb_cfg_result res = { 0 };
+       struct cfg_write_pkg request = {
+               .header = make_header(route),
+               .addr = {
+                       .port = port,
+                       .space = space,
+                       .offset = offset,
+                       .length = length,
+               },
+       };
+       struct cfg_read_pkg reply;
+
+       memcpy(&request.data, buffer, length * 4);
+
+       res.err = tb_ctl_tx(ctl, &request, 12 + 4 * length, TB_CFG_PKG_WRITE);
+       if (res.err)
+               return res;
+
+       res = tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route,
+                       TB_CFG_PKG_WRITE);
+       if (res.err)
+               return res;
+
+       res.response_port = reply.addr.port;
+       res.err = check_config_address(reply.addr, space, offset, length);
+       return res;
+}
+
+int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
+               enum tb_cfg_space space, u32 offset, u32 length)
+{
+       struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
+                       space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
+       if (res.err == 1) {
+               tb_cfg_print_error(ctl, &res);
+               return -EIO;
+       }
+       WARN(res.err, "tb_cfg_read: %d\n", res.err);
+       return res.err;
+}
+
+int tb_cfg_write(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
+                enum tb_cfg_space space, u32 offset, u32 length)
+{
+       struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
+                       space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
+       if (res.err == 1) {
+               tb_cfg_print_error(ctl, &res);
+               return -EIO;
+       }
+       WARN(res.err, "tb_cfg_write: %d\n", res.err);
+       return res.err;
+}
+
+/**
+ * tb_cfg_get_upstream_port() - get upstream port number of switch at route
+ *
+ * Reads the first dword from the switches TB_CFG_SWITCH config area and
+ * returns the port number from which the reply originated.
+ *
+ * Return: Returns the upstream port number on success or an error code on
+ * failure.
+ */
+int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
+{
+       u32 dummy;
+       struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
+                                                  TB_CFG_SWITCH, 0, 1,
+                                                  TB_CFG_DEFAULT_TIMEOUT);
+       if (res.err == 1)
+               return -EIO;
+       if (res.err)
+               return res.err;
+       return res.response_port;
+}
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
new file mode 100644 (file)
index 0000000..ba87d6e
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Thunderbolt Cactus Ridge driver - control channel and configuration commands
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef _TB_CFG
+#define _TB_CFG
+
+#include "nhi.h"
+
+/* control channel */
+struct tb_ctl;
+
+typedef void (*hotplug_cb)(void *data, u64 route, u8 port, bool unplug);
+
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data);
+void tb_ctl_start(struct tb_ctl *ctl);
+void tb_ctl_stop(struct tb_ctl *ctl);
+void tb_ctl_free(struct tb_ctl *ctl);
+
+/* configuration commands */
+
+#define TB_CFG_DEFAULT_TIMEOUT 5000 /* msec */
+
+enum tb_cfg_space {
+       TB_CFG_HOPS = 0,
+       TB_CFG_PORT = 1,
+       TB_CFG_SWITCH = 2,
+       TB_CFG_COUNTERS = 3,
+};
+
+enum tb_cfg_error {
+       TB_CFG_ERROR_PORT_NOT_CONNECTED = 0,
+       TB_CFG_ERROR_INVALID_CONFIG_SPACE = 2,
+       TB_CFG_ERROR_NO_SUCH_PORT = 4,
+       TB_CFG_ERROR_ACK_PLUG_EVENT = 7, /* send as reply to TB_CFG_PKG_EVENT */
+       TB_CFG_ERROR_LOOP = 8,
+};
+
+struct tb_cfg_result {
+       u64 response_route;
+       u32 response_port; /*
+                           * If err = 1 then this is the port that send the
+                           * error.
+                           * If err = 0 and if this was a cfg_read/write then
+                           * this is the the upstream port of the responding
+                           * switch.
+                           * Otherwise the field is set to zero.
+                           */
+       int err; /* negative errors, 0 for success, 1 for tb errors */
+       enum tb_cfg_error tb_error; /* valid if err == 1 */
+};
+
+
+int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
+                enum tb_cfg_error error);
+struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
+                                 int timeout_msec);
+struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
+                                    u64 route, u32 port,
+                                    enum tb_cfg_space space, u32 offset,
+                                    u32 length, int timeout_msec);
+struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer,
+                                     u64 route, u32 port,
+                                     enum tb_cfg_space space, u32 offset,
+                                     u32 length, int timeout_msec);
+int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
+               enum tb_cfg_space space, u32 offset, u32 length);
+int tb_cfg_write(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
+                enum tb_cfg_space space, u32 offset, u32 length);
+int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route);
+
+
+#endif
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
new file mode 100644 (file)
index 0000000..0dde34e
--- /dev/null
@@ -0,0 +1,449 @@
+/*
+ * Thunderbolt Cactus Ridge driver - eeprom access
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include "tb.h"
+
+/**
+ * tb_eeprom_ctl_write() - write control word
+ */
+static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
+{
+       return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
+}
+
+/**
+ * tb_eeprom_ctl_write() - read control word
+ */
+static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
+{
+       return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
+}
+
+enum tb_eeprom_transfer {
+       TB_EEPROM_IN,
+       TB_EEPROM_OUT,
+};
+
+/**
+ * tb_eeprom_active - enable rom access
+ *
+ * WARNING: Always disable access after usage. Otherwise the controller will
+ * fail to reprobe.
+ */
+static int tb_eeprom_active(struct tb_switch *sw, bool enable)
+{
+       struct tb_eeprom_ctl ctl;
+       int res = tb_eeprom_ctl_read(sw, &ctl);
+       if (res)
+               return res;
+       if (enable) {
+               ctl.access_high = 1;
+               res = tb_eeprom_ctl_write(sw, &ctl);
+               if (res)
+                       return res;
+               ctl.access_low = 0;
+               return tb_eeprom_ctl_write(sw, &ctl);
+       } else {
+               ctl.access_low = 1;
+               res = tb_eeprom_ctl_write(sw, &ctl);
+               if (res)
+                       return res;
+               ctl.access_high = 0;
+               return tb_eeprom_ctl_write(sw, &ctl);
+       }
+}
+
+/**
+ * tb_eeprom_transfer - transfer one bit
+ *
+ * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->data_in.
+ * If TB_EEPROM_OUT is passed, then ctl->data_out will be written.
+ */
+static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl,
+                             enum tb_eeprom_transfer direction)
+{
+       int res;
+       if (direction == TB_EEPROM_OUT) {
+               res = tb_eeprom_ctl_write(sw, ctl);
+               if (res)
+                       return res;
+       }
+       ctl->clock = 1;
+       res = tb_eeprom_ctl_write(sw, ctl);
+       if (res)
+               return res;
+       if (direction == TB_EEPROM_IN) {
+               res = tb_eeprom_ctl_read(sw, ctl);
+               if (res)
+                       return res;
+       }
+       ctl->clock = 0;
+       return tb_eeprom_ctl_write(sw, ctl);
+}
+
+/**
+ * tb_eeprom_out - write one byte to the bus
+ */
+static int tb_eeprom_out(struct tb_switch *sw, u8 val)
+{
+       struct tb_eeprom_ctl ctl;
+       int i;
+       int res = tb_eeprom_ctl_read(sw, &ctl);
+       if (res)
+               return res;
+       for (i = 0; i < 8; i++) {
+               ctl.data_out = val & 0x80;
+               res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT);
+               if (res)
+                       return res;
+               val <<= 1;
+       }
+       return 0;
+}
+
+/**
+ * tb_eeprom_in - read one byte from the bus
+ */
+static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
+{
+       struct tb_eeprom_ctl ctl;
+       int i;
+       int res = tb_eeprom_ctl_read(sw, &ctl);
+       if (res)
+               return res;
+       *val = 0;
+       for (i = 0; i < 8; i++) {
+               *val <<= 1;
+               res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN);
+               if (res)
+                       return res;
+               *val |= ctl.data_in;
+       }
+       return 0;
+}
+
+/**
+ * tb_eeprom_read_n - read count bytes from offset into val
+ */
+static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
+               size_t count)
+{
+       int i, res;
+       res = tb_eeprom_active(sw, true);
+       if (res)
+               return res;
+       res = tb_eeprom_out(sw, 3);
+       if (res)
+               return res;
+       res = tb_eeprom_out(sw, offset >> 8);
+       if (res)
+               return res;
+       res = tb_eeprom_out(sw, offset);
+       if (res)
+               return res;
+       for (i = 0; i < count; i++) {
+               res = tb_eeprom_in(sw, val + i);
+               if (res)
+                       return res;
+       }
+       return tb_eeprom_active(sw, false);
+}
+
+static u8 tb_crc8(u8 *data, int len)
+{
+       int i, j;
+       u8 val = 0xff;
+       for (i = 0; i < len; i++) {
+               val ^= data[i];
+               for (j = 0; j < 8; j++)
+                       val = (val << 1) ^ ((val & 0x80) ? 7 : 0);
+       }
+       return val;
+}
+
+static u32 tb_crc32(void *data, size_t len)
+{
+       return ~__crc32c_le(~0, data, len);
+}
+
+#define TB_DROM_DATA_START 13
+struct tb_drom_header {
+       /* BYTE 0 */
+       u8 uid_crc8; /* checksum for uid */
+       /* BYTES 1-8 */
+       u64 uid;
+       /* BYTES 9-12 */
+       u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */
+       /* BYTE 13 */
+       u8 device_rom_revision; /* should be <= 1 */
+       u16 data_len:10;
+       u8 __unknown1:6;
+       /* BYTES 16-21 */
+       u16 vendor_id;
+       u16 model_id;
+       u8 model_rev;
+       u8 eeprom_rev;
+} __packed;
+
+enum tb_drom_entry_type {
+       /* force unsigned to prevent "one-bit signed bitfield" warning */
+       TB_DROM_ENTRY_GENERIC = 0U,
+       TB_DROM_ENTRY_PORT,
+};
+
+struct tb_drom_entry_header {
+       u8 len;
+       u8 index:6;
+       bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */
+       enum tb_drom_entry_type type:1;
+} __packed;
+
+struct tb_drom_entry_port {
+       /* BYTES 0-1 */
+       struct tb_drom_entry_header header;
+       /* BYTE 2 */
+       u8 dual_link_port_rid:4;
+       u8 link_nr:1;
+       u8 unknown1:2;
+       bool has_dual_link_port:1;
+
+       /* BYTE 3 */
+       u8 dual_link_port_nr:6;
+       u8 unknown2:2;
+
+       /* BYTES 4 - 5 TODO decode */
+       u8 micro2:4;
+       u8 micro1:4;
+       u8 micro3;
+
+       /* BYTES 5-6, TODO: verify (find hardware that has these set) */
+       u8 peer_port_rid:4;
+       u8 unknown3:3;
+       bool has_peer_port:1;
+       u8 peer_port_nr:6;
+       u8 unknown4:2;
+} __packed;
+
+
+/**
+ * tb_eeprom_get_drom_offset - get drom offset within eeprom
+ */
+static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
+{
+       struct tb_cap_plug_events cap;
+       int res;
+       if (!sw->cap_plug_events) {
+               tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
+               return -ENOSYS;
+       }
+       res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
+                            sizeof(cap) / 4);
+       if (res)
+               return res;
+
+       if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
+               tb_sw_warn(sw, "no NVM\n");
+               return -ENOSYS;
+       }
+
+       if (cap.drom_offset > 0xffff) {
+               tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
+                               cap.drom_offset);
+               return -ENXIO;
+       }
+       *offset = cap.drom_offset;
+       return 0;
+}
+
+/**
+ * tb_drom_read_uid_only - read uid directly from drom
+ *
+ * Does not use the cached copy in sw->drom. Used during resume to check switch
+ * identity.
+ */
+int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
+{
+       u8 data[9];
+       u16 drom_offset;
+       u8 crc;
+       int res = tb_eeprom_get_drom_offset(sw, &drom_offset);
+       if (res)
+               return res;
+
+       /* read uid */
+       res = tb_eeprom_read_n(sw, drom_offset, data, 9);
+       if (res)
+               return res;
+
+       crc = tb_crc8(data + 1, 8);
+       if (crc != data[0]) {
+               tb_sw_warn(sw, "uid crc8 missmatch (expected: %#x, got: %#x)\n",
+                               data[0], crc);
+               return -EIO;
+       }
+
+       *uid = *(u64 *)(data+1);
+       return 0;
+}
+
+static void tb_drom_parse_port_entry(struct tb_port *port,
+               struct tb_drom_entry_port *entry)
+{
+       port->link_nr = entry->link_nr;
+       if (entry->has_dual_link_port)
+               port->dual_link_port =
+                               &port->sw->ports[entry->dual_link_port_nr];
+}
+
+static int tb_drom_parse_entry(struct tb_switch *sw,
+               struct tb_drom_entry_header *header)
+{
+       struct tb_port *port;
+       int res;
+       enum tb_port_type type;
+
+       if (header->type != TB_DROM_ENTRY_PORT)
+               return 0;
+
+       port = &sw->ports[header->index];
+       port->disabled = header->port_disabled;
+       if (port->disabled)
+               return 0;
+
+       res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1);
+       if (res)
+               return res;
+       type &= 0xffffff;
+
+       if (type == TB_TYPE_PORT) {
+               struct tb_drom_entry_port *entry = (void *) header;
+               if (header->len != sizeof(*entry)) {
+                       tb_sw_warn(sw,
+                               "port entry has size %#x (expected %#zx)\n",
+                               header->len, sizeof(struct tb_drom_entry_port));
+                       return -EIO;
+               }
+               tb_drom_parse_port_entry(port, entry);
+       }
+       return 0;
+}
+
+/**
+ * tb_drom_parse_entries - parse the linked list of drom entries
+ *
+ * Drom must have been copied to sw->drom.
+ */
+static int tb_drom_parse_entries(struct tb_switch *sw)
+{
+       struct tb_drom_header *header = (void *) sw->drom;
+       u16 pos = sizeof(*header);
+       u16 drom_size = header->data_len + TB_DROM_DATA_START;
+
+       while (pos < drom_size) {
+               struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
+               if (pos + 1 == drom_size || pos + entry->len > drom_size
+                               || !entry->len) {
+                       tb_sw_warn(sw, "drom buffer overrun, aborting\n");
+                       return -EIO;
+               }
+
+               tb_drom_parse_entry(sw, entry);
+
+               pos += entry->len;
+       }
+       return 0;
+}
+
+/**
+ * tb_drom_read - copy drom to sw->drom and parse it
+ */
+int tb_drom_read(struct tb_switch *sw)
+{
+       u16 drom_offset;
+       u16 size;
+       u32 crc;
+       struct tb_drom_header *header;
+       int res;
+       if (sw->drom)
+               return 0;
+
+       if (tb_route(sw) == 0) {
+               /*
+                * The root switch contains only a dummy drom (header only,
+                * no entries). Hardcode the configuration here.
+                */
+               tb_drom_read_uid_only(sw, &sw->uid);
+
+               sw->ports[1].link_nr = 0;
+               sw->ports[2].link_nr = 1;
+               sw->ports[1].dual_link_port = &sw->ports[2];
+               sw->ports[2].dual_link_port = &sw->ports[1];
+
+               sw->ports[3].link_nr = 0;
+               sw->ports[4].link_nr = 1;
+               sw->ports[3].dual_link_port = &sw->ports[4];
+               sw->ports[4].dual_link_port = &sw->ports[3];
+               return 0;
+       }
+
+       res = tb_eeprom_get_drom_offset(sw, &drom_offset);
+       if (res)
+               return res;
+
+       res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2);
+       if (res)
+               return res;
+       size &= 0x3ff;
+       size += TB_DROM_DATA_START;
+       tb_sw_info(sw, "reading drom (length: %#x)\n", size);
+       if (size < sizeof(*header)) {
+               tb_sw_warn(sw, "drom too small, aborting\n");
+               return -EIO;
+       }
+
+       sw->drom = kzalloc(size, GFP_KERNEL);
+       if (!sw->drom)
+               return -ENOMEM;
+       res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size);
+       if (res)
+               goto err;
+
+       header = (void *) sw->drom;
+
+       if (header->data_len + TB_DROM_DATA_START != size) {
+               tb_sw_warn(sw, "drom size mismatch, aborting\n");
+               goto err;
+       }
+
+       crc = tb_crc8((u8 *) &header->uid, 8);
+       if (crc != header->uid_crc8) {
+               tb_sw_warn(sw,
+                       "drom uid crc8 mismatch (expected: %#x, got: %#x), aborting\n",
+                       header->uid_crc8, crc);
+               goto err;
+       }
+       sw->uid = header->uid;
+
+       crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
+       if (crc != header->data_crc32) {
+               tb_sw_warn(sw,
+                       "drom data crc32 mismatch (expected: %#x, got: %#x), aborting\n",
+                       header->data_crc32, crc);
+               goto err;
+       }
+
+       if (header->device_rom_revision > 1)
+               tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
+                       header->device_rom_revision);
+
+       return tb_drom_parse_entries(sw);
+err:
+       kfree(sw->drom);
+       return -EIO;
+
+}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
new file mode 100644 (file)
index 0000000..c68fe12
--- /dev/null
@@ -0,0 +1,675 @@
+/*
+ * Thunderbolt Cactus Ridge driver - NHI driver
+ *
+ * The NHI (native host interface) is the pci device that allows us to send and
+ * receive frames from the thunderbolt bus.
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+
+#include "nhi.h"
+#include "nhi_regs.h"
+#include "tb.h"
+
+#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
+
+
+static int ring_interrupt_index(struct tb_ring *ring)
+{
+       int bit = ring->hop;
+       if (!ring->is_tx)
+               bit += ring->nhi->hop_count;
+       return bit;
+}
+
+/**
+ * ring_interrupt_active() - activate/deactivate interrupts for a single ring
+ *
+ * ring->nhi->lock must be held.
+ */
+static void ring_interrupt_active(struct tb_ring *ring, bool active)
+{
+       int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32;
+       int bit = ring_interrupt_index(ring) & 31;
+       int mask = 1 << bit;
+       u32 old, new;
+       old = ioread32(ring->nhi->iobase + reg);
+       if (active)
+               new = old | mask;
+       else
+               new = old & ~mask;
+
+       dev_info(&ring->nhi->pdev->dev,
+                "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
+                active ? "enabling" : "disabling", reg, bit, old, new);
+
+       if (new == old)
+               dev_WARN(&ring->nhi->pdev->dev,
+                                        "interrupt for %s %d is already %s\n",
+                                        RING_TYPE(ring), ring->hop,
+                                        active ? "enabled" : "disabled");
+       iowrite32(new, ring->nhi->iobase + reg);
+}
+
+/**
+ * nhi_disable_interrupts() - disable interrupts for all rings
+ *
+ * Use only during init and shutdown.
+ */
+static void nhi_disable_interrupts(struct tb_nhi *nhi)
+{
+       int i = 0;
+       /* disable interrupts */
+       for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
+               iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
+
+       /* clear interrupt status bits */
+       for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
+               ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
+}
+
+/* ring helper methods */
+
+static void __iomem *ring_desc_base(struct tb_ring *ring)
+{
+       void __iomem *io = ring->nhi->iobase;
+       io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
+       io += ring->hop * 16;
+       return io;
+}
+
+static void __iomem *ring_options_base(struct tb_ring *ring)
+{
+       void __iomem *io = ring->nhi->iobase;
+       io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
+       io += ring->hop * 32;
+       return io;
+}
+
+static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
+{
+       iowrite16(value, ring_desc_base(ring) + offset);
+}
+
+static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
+{
+       iowrite32(value, ring_desc_base(ring) + offset);
+}
+
+static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
+{
+       iowrite32(value, ring_desc_base(ring) + offset);
+       iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
+}
+
+static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
+{
+       iowrite32(value, ring_options_base(ring) + offset);
+}
+
+static bool ring_full(struct tb_ring *ring)
+{
+       return ((ring->head + 1) % ring->size) == ring->tail;
+}
+
+static bool ring_empty(struct tb_ring *ring)
+{
+       return ring->head == ring->tail;
+}
+
+/**
+ * ring_write_descriptors() - post frames from ring->queue to the controller
+ *
+ * ring->lock is held.
+ */
+static void ring_write_descriptors(struct tb_ring *ring)
+{
+       struct ring_frame *frame, *n;
+       struct ring_desc *descriptor;
+       list_for_each_entry_safe(frame, n, &ring->queue, list) {
+               if (ring_full(ring))
+                       break;
+               list_move_tail(&frame->list, &ring->in_flight);
+               descriptor = &ring->descriptors[ring->head];
+               descriptor->phys = frame->buffer_phy;
+               descriptor->time = 0;
+               descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
+               if (ring->is_tx) {
+                       descriptor->length = frame->size;
+                       descriptor->eof = frame->eof;
+                       descriptor->sof = frame->sof;
+               }
+               ring->head = (ring->head + 1) % ring->size;
+               ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
+       }
+}
+
+/**
+ * ring_work() - progress completed frames
+ *
+ * If the ring is shutting down then all frames are marked as canceled and
+ * their callbacks are invoked.
+ *
+ * Otherwise we collect all completed frame from the ring buffer, write new
+ * frame to the ring buffer and invoke the callbacks for the completed frames.
+ */
+static void ring_work(struct work_struct *work)
+{
+       struct tb_ring *ring = container_of(work, typeof(*ring), work);
+       struct ring_frame *frame;
+       bool canceled = false;
+       LIST_HEAD(done);
+       mutex_lock(&ring->lock);
+
+       if (!ring->running) {
+               /*  Move all frames to done and mark them as canceled. */
+               list_splice_tail_init(&ring->in_flight, &done);
+               list_splice_tail_init(&ring->queue, &done);
+               canceled = true;
+               goto invoke_callback;
+       }
+
+       while (!ring_empty(ring)) {
+               if (!(ring->descriptors[ring->tail].flags
+                               & RING_DESC_COMPLETED))
+                       break;
+               frame = list_first_entry(&ring->in_flight, typeof(*frame),
+                                        list);
+               list_move_tail(&frame->list, &done);
+               if (!ring->is_tx) {
+                       frame->size = ring->descriptors[ring->tail].length;
+                       frame->eof = ring->descriptors[ring->tail].eof;
+                       frame->sof = ring->descriptors[ring->tail].sof;
+                       frame->flags = ring->descriptors[ring->tail].flags;
+                       if (frame->sof != 0)
+                               dev_WARN(&ring->nhi->pdev->dev,
+                                        "%s %d got unexpected SOF: %#x\n",
+                                        RING_TYPE(ring), ring->hop,
+                                        frame->sof);
+                       /*
+                        * known flags:
+                        * raw not enabled, interupt not set: 0x2=0010
+                        * raw enabled: 0xa=1010
+                        * raw not enabled: 0xb=1011
+                        * partial frame (>MAX_FRAME_SIZE): 0xe=1110
+                        */
+                       if (frame->flags != 0xa)
+                               dev_WARN(&ring->nhi->pdev->dev,
+                                        "%s %d got unexpected flags: %#x\n",
+                                        RING_TYPE(ring), ring->hop,
+                                        frame->flags);
+               }
+               ring->tail = (ring->tail + 1) % ring->size;
+       }
+       ring_write_descriptors(ring);
+
+invoke_callback:
+       mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */
+       while (!list_empty(&done)) {
+               frame = list_first_entry(&done, typeof(*frame), list);
+               /*
+                * The callback may reenqueue or delete frame.
+                * Do not hold on to it.
+                */
+               list_del_init(&frame->list);
+               frame->callback(ring, frame, canceled);
+       }
+}
+
+int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
+{
+       int ret = 0;
+       mutex_lock(&ring->lock);
+       if (ring->running) {
+               list_add_tail(&frame->list, &ring->queue);
+               ring_write_descriptors(ring);
+       } else {
+               ret = -ESHUTDOWN;
+       }
+       mutex_unlock(&ring->lock);
+       return ret;
+}
+
+static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
+                                 bool transmit)
+{
+       struct tb_ring *ring = NULL;
+       dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
+                transmit ? "TX" : "RX", hop, size);
+
+       mutex_lock(&nhi->lock);
+       if (hop >= nhi->hop_count) {
+               dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
+               goto err;
+       }
+       if (transmit && nhi->tx_rings[hop]) {
+               dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
+               goto err;
+       } else if (!transmit && nhi->rx_rings[hop]) {
+               dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
+               goto err;
+       }
+       ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+       if (!ring)
+               goto err;
+
+       mutex_init(&ring->lock);
+       INIT_LIST_HEAD(&ring->queue);
+       INIT_LIST_HEAD(&ring->in_flight);
+       INIT_WORK(&ring->work, ring_work);
+
+       ring->nhi = nhi;
+       ring->hop = hop;
+       ring->is_tx = transmit;
+       ring->size = size;
+       ring->head = 0;
+       ring->tail = 0;
+       ring->running = false;
+       ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
+                       size * sizeof(*ring->descriptors),
+                       &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
+       if (!ring->descriptors)
+               goto err;
+
+       if (transmit)
+               nhi->tx_rings[hop] = ring;
+       else
+               nhi->rx_rings[hop] = ring;
+       mutex_unlock(&nhi->lock);
+       return ring;
+
+err:
+       if (ring)
+               mutex_destroy(&ring->lock);
+       kfree(ring);
+       mutex_unlock(&nhi->lock);
+       return NULL;
+}
+
+struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size)
+{
+       return ring_alloc(nhi, hop, size, true);
+}
+
+struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size)
+{
+       return ring_alloc(nhi, hop, size, false);
+}
+
+/**
+ * ring_start() - enable a ring
+ *
+ * Must not be invoked in parallel with ring_stop().
+ */
+void ring_start(struct tb_ring *ring)
+{
+       mutex_lock(&ring->nhi->lock);
+       mutex_lock(&ring->lock);
+       if (ring->running) {
+               dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
+               goto err;
+       }
+       dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
+                RING_TYPE(ring), ring->hop);
+
+       ring_iowrite64desc(ring, ring->descriptors_dma, 0);
+       if (ring->is_tx) {
+               ring_iowrite32desc(ring, ring->size, 12);
+               ring_iowrite32options(ring, 0, 4); /* time releated ? */
+               ring_iowrite32options(ring,
+                                     RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
+       } else {
+               ring_iowrite32desc(ring,
+                                  (TB_FRAME_SIZE << 16) | ring->size, 12);
+               ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */
+               ring_iowrite32options(ring,
+                                     RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
+       }
+       ring_interrupt_active(ring, true);
+       ring->running = true;
+err:
+       mutex_unlock(&ring->lock);
+       mutex_unlock(&ring->nhi->lock);
+}
+
+
+/**
+ * ring_stop() - shutdown a ring
+ *
+ * Must not be invoked from a callback.
+ *
+ * This method will disable the ring. Further calls to ring_tx/ring_rx will
+ * return -ESHUTDOWN until ring_stop has been called.
+ *
+ * All enqueued frames will be canceled and their callbacks will be executed
+ * with frame->canceled set to true (on the callback thread). This method
+ * returns only after all callback invocations have finished.
+ */
+void ring_stop(struct tb_ring *ring)
+{
+       mutex_lock(&ring->nhi->lock);
+       mutex_lock(&ring->lock);
+       dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
+                RING_TYPE(ring), ring->hop);
+       if (!ring->running) {
+               dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
+                        RING_TYPE(ring), ring->hop);
+               goto err;
+       }
+       ring_interrupt_active(ring, false);
+
+       ring_iowrite32options(ring, 0, 0);
+       ring_iowrite64desc(ring, 0, 0);
+       ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
+       ring_iowrite32desc(ring, 0, 12);
+       ring->head = 0;
+       ring->tail = 0;
+       ring->running = false;
+
+err:
+       mutex_unlock(&ring->lock);
+       mutex_unlock(&ring->nhi->lock);
+
+       /*
+        * schedule ring->work to invoke callbacks on all remaining frames.
+        */
+       schedule_work(&ring->work);
+       flush_work(&ring->work);
+}
+
+/*
+ * ring_free() - free ring
+ *
+ * When this method returns all invocations of ring->callback will have
+ * finished.
+ *
+ * Ring must be stopped.
+ *
+ * Must NOT be called from ring_frame->callback!
+ */
+void ring_free(struct tb_ring *ring)
+{
+       mutex_lock(&ring->nhi->lock);
+       /*
+        * Dissociate the ring from the NHI. This also ensures that
+        * nhi_interrupt_work cannot reschedule ring->work.
+        */
+       if (ring->is_tx)
+               ring->nhi->tx_rings[ring->hop] = NULL;
+       else
+               ring->nhi->rx_rings[ring->hop] = NULL;
+
+       if (ring->running) {
+               dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
+                        RING_TYPE(ring), ring->hop);
+       }
+
+       dma_free_coherent(&ring->nhi->pdev->dev,
+                         ring->size * sizeof(*ring->descriptors),
+                         ring->descriptors, ring->descriptors_dma);
+
+       ring->descriptors = NULL;
+       ring->descriptors_dma = 0;
+
+
+       dev_info(&ring->nhi->pdev->dev,
+                "freeing %s %d\n",
+                RING_TYPE(ring),
+                ring->hop);
+
+       mutex_unlock(&ring->nhi->lock);
+       /**
+        * ring->work can no longer be scheduled (it is scheduled only by
+        * nhi_interrupt_work and ring_stop). Wait for it to finish before
+        * freeing the ring.
+        */
+       flush_work(&ring->work);
+       mutex_destroy(&ring->lock);
+       kfree(ring);
+}
+
+static void nhi_interrupt_work(struct work_struct *work)
+{
+       struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
+       int value = 0; /* Suppress uninitialized usage warning. */
+       int bit;
+       int hop = -1;
+       int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
+       struct tb_ring *ring;
+
+       mutex_lock(&nhi->lock);
+
+       /*
+        * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
+        * (TX, RX, RX overflow). We iterate over the bits and read a new
+        * dwords as required. The registers are cleared on read.
+        */
+       for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
+               if (bit % 32 == 0)
+                       value = ioread32(nhi->iobase
+                                        + REG_RING_NOTIFY_BASE
+                                        + 4 * (bit / 32));
+               if (++hop == nhi->hop_count) {
+                       hop = 0;
+                       type++;
+               }
+               if ((value & (1 << (bit % 32))) == 0)
+                       continue;
+               if (type == 2) {
+                       dev_warn(&nhi->pdev->dev,
+                                "RX overflow for ring %d\n",
+                                hop);
+                       continue;
+               }
+               if (type == 0)
+                       ring = nhi->tx_rings[hop];
+               else
+                       ring = nhi->rx_rings[hop];
+               if (ring == NULL) {
+                       dev_warn(&nhi->pdev->dev,
+                                "got interrupt for inactive %s ring %d\n",
+                                type ? "RX" : "TX",
+                                hop);
+                       continue;
+               }
+               /* we do not check ring->running, this is done in ring->work */
+               schedule_work(&ring->work);
+       }
+       mutex_unlock(&nhi->lock);
+}
+
+static irqreturn_t nhi_msi(int irq, void *data)
+{
+       struct tb_nhi *nhi = data;
+       schedule_work(&nhi->interrupt_work);
+       return IRQ_HANDLED;
+}
+
+static int nhi_suspend_noirq(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct tb *tb = pci_get_drvdata(pdev);
+       thunderbolt_suspend(tb);
+       return 0;
+}
+
+static int nhi_resume_noirq(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct tb *tb = pci_get_drvdata(pdev);
+       thunderbolt_resume(tb);
+       return 0;
+}
+
+static void nhi_shutdown(struct tb_nhi *nhi)
+{
+       int i;
+       dev_info(&nhi->pdev->dev, "shutdown\n");
+
+       for (i = 0; i < nhi->hop_count; i++) {
+               if (nhi->tx_rings[i])
+                       dev_WARN(&nhi->pdev->dev,
+                                "TX ring %d is still active\n", i);
+               if (nhi->rx_rings[i])
+                       dev_WARN(&nhi->pdev->dev,
+                                "RX ring %d is still active\n", i);
+       }
+       nhi_disable_interrupts(nhi);
+       /*
+        * We have to release the irq before calling flush_work. Otherwise an
+        * already executing IRQ handler could call schedule_work again.
+        */
+       devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
+       flush_work(&nhi->interrupt_work);
+       mutex_destroy(&nhi->lock);
+}
+
+static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct tb_nhi *nhi;
+       struct tb *tb;
+       int res;
+
+       res = pcim_enable_device(pdev);
+       if (res) {
+               dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
+               return res;
+       }
+
+       res = pci_enable_msi(pdev);
+       if (res) {
+               dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
+               return res;
+       }
+
+       res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
+       if (res) {
+               dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
+               return res;
+       }
+
+       nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
+       if (!nhi)
+               return -ENOMEM;
+
+       nhi->pdev = pdev;
+       /* cannot fail - table is allocated bin pcim_iomap_regions */
+       nhi->iobase = pcim_iomap_table(pdev)[0];
+       nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
+       if (nhi->hop_count != 12)
+               dev_warn(&pdev->dev, "unexpected hop count: %d\n",
+                        nhi->hop_count);
+       INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
+
+       nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
+                                    sizeof(*nhi->tx_rings), GFP_KERNEL);
+       nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
+                                    sizeof(*nhi->rx_rings), GFP_KERNEL);
+       if (!nhi->tx_rings || !nhi->rx_rings)
+               return -ENOMEM;
+
+       nhi_disable_interrupts(nhi); /* In case someone left them on. */
+       res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi,
+                              IRQF_NO_SUSPEND, /* must work during _noirq */
+                              "thunderbolt", nhi);
+       if (res) {
+               dev_err(&pdev->dev, "request_irq failed, aborting\n");
+               return res;
+       }
+
+       mutex_init(&nhi->lock);
+
+       pci_set_master(pdev);
+
+       /* magic value - clock related? */
+       iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
+
+       dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
+       tb = thunderbolt_alloc_and_start(nhi);
+       if (!tb) {
+               /*
+                * At this point the RX/TX rings might already have been
+                * activated. Do a proper shutdown.
+                */
+               nhi_shutdown(nhi);
+               return -EIO;
+       }
+       pci_set_drvdata(pdev, tb);
+
+       return 0;
+}
+
+static void nhi_remove(struct pci_dev *pdev)
+{
+       struct tb *tb = pci_get_drvdata(pdev);
+       struct tb_nhi *nhi = tb->nhi;
+       thunderbolt_shutdown_and_free(tb);
+       nhi_shutdown(nhi);
+}
+
+/*
+ * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
+ * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
+ * resume_noirq until we are done.
+ */
+static const struct dev_pm_ops nhi_pm_ops = {
+       .suspend_noirq = nhi_suspend_noirq,
+       .resume_noirq = nhi_resume_noirq,
+       .freeze_noirq = nhi_suspend_noirq, /*
+                                           * we just disable hotplug, the
+                                           * pci-tunnels stay alive.
+                                           */
+       .restore_noirq = nhi_resume_noirq,
+};
+
+static struct pci_device_id nhi_ids[] = {
+       /*
+        * We have to specify class, the TB bridges use the same device and
+        * vendor (sub)id.
+        */
+       {
+               .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+               .vendor = PCI_VENDOR_ID_INTEL, .device = 0x1547,
+               .subvendor = 0x2222, .subdevice = 0x1111,
+       },
+       {
+               .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+               .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c,
+               .subvendor = 0x2222, .subdevice = 0x1111,
+       },
+       { 0,}
+};
+
+MODULE_DEVICE_TABLE(pci, nhi_ids);
+MODULE_LICENSE("GPL");
+
+static struct pci_driver nhi_driver = {
+       .name = "thunderbolt",
+       .id_table = nhi_ids,
+       .probe = nhi_probe,
+       .remove = nhi_remove,
+       .driver.pm = &nhi_pm_ops,
+};
+
+static int __init nhi_init(void)
+{
+       if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
+               return -ENOSYS;
+       return pci_register_driver(&nhi_driver);
+}
+
+static void __exit nhi_unload(void)
+{
+       pci_unregister_driver(&nhi_driver);
+}
+
+module_init(nhi_init);
+module_exit(nhi_unload);
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
new file mode 100644 (file)
index 0000000..3172429
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Thunderbolt Cactus Ridge driver - NHI driver
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef DSL3510_H_
+#define DSL3510_H_
+
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+/**
+ * struct tb_nhi - thunderbolt native host interface
+ */
+struct tb_nhi {
+       struct mutex lock; /*
+                           * Must be held during ring creation/destruction.
+                           * Is acquired by interrupt_work when dispatching
+                           * interrupts to individual rings.
+                           **/
+       struct pci_dev *pdev;
+       void __iomem *iobase;
+       struct tb_ring **tx_rings;
+       struct tb_ring **rx_rings;
+       struct work_struct interrupt_work;
+       u32 hop_count; /* Number of rings (end point hops) supported by NHI. */
+};
+
+/**
+ * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
+ */
+struct tb_ring {
+       struct mutex lock; /* must be acquired after nhi->lock */
+       struct tb_nhi *nhi;
+       int size;
+       int hop;
+       int head; /* write next descriptor here */
+       int tail; /* complete next descriptor here */
+       struct ring_desc *descriptors;
+       dma_addr_t descriptors_dma;
+       struct list_head queue;
+       struct list_head in_flight;
+       struct work_struct work;
+       bool is_tx:1; /* rx otherwise */
+       bool running:1;
+};
+
+struct ring_frame;
+typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled);
+
+/**
+ * struct ring_frame - for use with ring_rx/ring_tx
+ */
+struct ring_frame {
+       dma_addr_t buffer_phy;
+       ring_cb callback;
+       struct list_head list;
+       u32 size:12; /* TX: in, RX: out*/
+       u32 flags:12; /* RX: out */
+       u32 eof:4; /* TX:in, RX: out */
+       u32 sof:4; /* TX:in, RX: out */
+};
+
+#define TB_FRAME_SIZE 0x100    /* minimum size for ring_rx */
+
+struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size);
+struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size);
+void ring_start(struct tb_ring *ring);
+void ring_stop(struct tb_ring *ring);
+void ring_free(struct tb_ring *ring);
+
+int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
+
+/**
+ * ring_rx() - enqueue a frame on an RX ring
+ *
+ * frame->buffer, frame->buffer_phy and frame->callback have to be set. The
+ * buffer must contain at least TB_FRAME_SIZE bytes.
+ *
+ * frame->callback will be invoked with frame->size, frame->flags, frame->eof,
+ * frame->sof set once the frame has been received.
+ *
+ * If ring_stop is called after the packet has been enqueued frame->callback
+ * will be called with canceled set to true.
+ *
+ * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
+ */
+static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame)
+{
+       WARN_ON(ring->is_tx);
+       return __ring_enqueue(ring, frame);
+}
+
+/**
+ * ring_tx() - enqueue a frame on an TX ring
+ *
+ * frame->buffer, frame->buffer_phy, frame->callback, frame->size, frame->eof
+ * and frame->sof have to be set.
+ *
+ * frame->callback will be invoked with once the frame has been transmitted.
+ *
+ * If ring_stop is called after the packet has been enqueued frame->callback
+ * will be called with canceled set to true.
+ *
+ * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
+ */
+static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame)
+{
+       WARN_ON(!ring->is_tx);
+       return __ring_enqueue(ring, frame);
+}
+
+#endif
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
new file mode 100644 (file)
index 0000000..86b996c
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Thunderbolt Cactus Ridge driver - NHI registers
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef DSL3510_REGS_H_
+#define DSL3510_REGS_H_
+
+#include <linux/types.h>
+
+enum ring_flags {
+       RING_FLAG_ISOCH_ENABLE = 1 << 27, /* TX only? */
+       RING_FLAG_E2E_FLOW_CONTROL = 1 << 28,
+       RING_FLAG_PCI_NO_SNOOP = 1 << 29,
+       RING_FLAG_RAW = 1 << 30, /* ignore EOF/SOF mask, include checksum */
+       RING_FLAG_ENABLE = 1 << 31,
+};
+
+enum ring_desc_flags {
+       RING_DESC_ISOCH = 0x1, /* TX only? */
+       RING_DESC_COMPLETED = 0x2, /* set by NHI */
+       RING_DESC_POSTED = 0x4, /* always set this */
+       RING_DESC_INTERRUPT = 0x8, /* request an interrupt on completion */
+};
+
+/**
+ * struct ring_desc - TX/RX ring entry
+ *
+ * For TX set length/eof/sof.
+ * For RX length/eof/sof are set by the NHI.
+ */
+struct ring_desc {
+       u64 phys;
+       u32 length:12;
+       u32 eof:4;
+       u32 sof:4;
+       enum ring_desc_flags flags:12;
+       u32 time; /* write zero */
+} __packed;
+
+/* NHI registers in bar 0 */
+
+/*
+ * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: physical pointer to an array of struct ring_desc
+ * 08: ring tail (set by NHI)
+ * 10: ring head (index of first non posted descriptor)
+ * 12: descriptor count
+ */
+#define REG_TX_RING_BASE       0x00000
+
+/*
+ * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: physical pointer to an array of struct ring_desc
+ * 08: ring head (index of first not posted descriptor)
+ * 10: ring tail (set by NHI)
+ * 12: descriptor count
+ * 14: max frame sizes (anything larger than 0x100 has no effect)
+ */
+#define REG_RX_RING_BASE       0x08000
+
+/*
+ * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: enum_ring_flags
+ * 04: isoch time stamp ?? (write 0)
+ * ..: unknown
+ */
+#define REG_TX_OPTIONS_BASE    0x19800
+
+/*
+ * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: enum ring_flags
+ *     If RING_FLAG_E2E_FLOW_CONTROL is set then bits 13-23 must be set to
+ *     the corresponding TX hop id.
+ * 04: EOF/SOF mask (ignored for RING_FLAG_RAW rings)
+ * ..: unknown
+ */
+#define REG_RX_OPTIONS_BASE    0x29800
+
+/*
+ * three bitfields: tx, rx, rx overflow
+ * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are
+ * cleared on read. New interrupts are fired only after ALL registers have been
+ * read (even those containing only disabled rings).
+ */
+#define REG_RING_NOTIFY_BASE   0x37800
+#define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
+
+/*
+ * two bitfields: rx, tx
+ * Both bitfields contains one bit for every hop (REG_HOP_COUNT). To
+ * enable/disable interrupts set/clear the corresponding bits.
+ */
+#define REG_RING_INTERRUPT_BASE        0x38200
+#define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
+
+/* The last 11 bits contain the number of hops supported by the NHI port. */
+#define REG_HOP_COUNT          0x39640
+
+#endif
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
new file mode 100644 (file)
index 0000000..8fcf8a7
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+ * Thunderbolt Cactus Ridge driver - path/tunnel functionality
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/errno.h>
+
+#include "tb.h"
+
+
+static void tb_dump_hop(struct tb_port *port, struct tb_regs_hop *hop)
+{
+       tb_port_info(port, " Hop through port %d to hop %d (%s)\n",
+                    hop->out_port, hop->next_hop,
+                    hop->enable ? "enabled" : "disabled");
+       tb_port_info(port, "  Weight: %d Priority: %d Credits: %d Drop: %d\n",
+                    hop->weight, hop->priority,
+                    hop->initial_credits, hop->drop_packages);
+       tb_port_info(port, "   Counter enabled: %d Counter index: %d\n",
+                    hop->counter_enable, hop->counter);
+       tb_port_info(port, "  Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
+                    hop->ingress_fc, hop->egress_fc,
+                    hop->ingress_shared_buffer, hop->egress_shared_buffer);
+       tb_port_info(port, "  Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
+                    hop->unknown1, hop->unknown2, hop->unknown3);
+}
+
+/**
+ * tb_path_alloc() - allocate a thunderbolt path
+ *
+ * Return: Returns a tb_path on success or NULL on failure.
+ */
+struct tb_path *tb_path_alloc(struct tb *tb, int num_hops)
+{
+       struct tb_path *path = kzalloc(sizeof(*path), GFP_KERNEL);
+       if (!path)
+               return NULL;
+       path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
+       if (!path->hops) {
+               kfree(path);
+               return NULL;
+       }
+       path->tb = tb;
+       path->path_length = num_hops;
+       return path;
+}
+
+/**
+ * tb_path_free() - free a deactivated path
+ */
+void tb_path_free(struct tb_path *path)
+{
+       if (path->activated) {
+               tb_WARN(path->tb, "trying to free an activated path\n")
+               return;
+       }
+       kfree(path->hops);
+       kfree(path);
+}
+
+static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
+{
+       int i, res;
+       for (i = first_hop; i < path->path_length; i++) {
+               res = tb_port_add_nfc_credits(path->hops[i].in_port,
+                                             -path->nfc_credits);
+               if (res)
+                       tb_port_warn(path->hops[i].in_port,
+                                    "nfc credits deallocation failed for hop %d\n",
+                                    i);
+       }
+}
+
+static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
+{
+       int i, res;
+       struct tb_regs_hop hop = { };
+       for (i = first_hop; i < path->path_length; i++) {
+               res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
+                                   2 * path->hops[i].in_hop_index, 2);
+               if (res)
+                       tb_port_warn(path->hops[i].in_port,
+                                    "hop deactivation failed for hop %d, index %d\n",
+                                    i, path->hops[i].in_hop_index);
+       }
+}
+
+void tb_path_deactivate(struct tb_path *path)
+{
+       if (!path->activated) {
+               tb_WARN(path->tb, "trying to deactivate an inactive path\n");
+               return;
+       }
+       tb_info(path->tb,
+               "deactivating path from %llx:%x to %llx:%x\n",
+               tb_route(path->hops[0].in_port->sw),
+               path->hops[0].in_port->port,
+               tb_route(path->hops[path->path_length - 1].out_port->sw),
+               path->hops[path->path_length - 1].out_port->port);
+       __tb_path_deactivate_hops(path, 0);
+       __tb_path_deallocate_nfc(path, 0);
+       path->activated = false;
+}
+
+/**
+ * tb_path_activate() - activate a path
+ *
+ * Activate a path starting with the last hop and iterating backwards. The
+ * caller must fill path->hops before calling tb_path_activate().
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_path_activate(struct tb_path *path)
+{
+       int i, res;
+       enum tb_path_port out_mask, in_mask;
+       if (path->activated) {
+               tb_WARN(path->tb, "trying to activate already activated path\n");
+               return -EINVAL;
+       }
+
+       tb_info(path->tb,
+               "activating path from %llx:%x to %llx:%x\n",
+               tb_route(path->hops[0].in_port->sw),
+               path->hops[0].in_port->port,
+               tb_route(path->hops[path->path_length - 1].out_port->sw),
+               path->hops[path->path_length - 1].out_port->port);
+
+       /* Clear counters. */
+       for (i = path->path_length - 1; i >= 0; i--) {
+               if (path->hops[i].in_counter_index == -1)
+                       continue;
+               res = tb_port_clear_counter(path->hops[i].in_port,
+                                           path->hops[i].in_counter_index);
+               if (res)
+                       goto err;
+       }
+
+       /* Add non flow controlled credits. */
+       for (i = path->path_length - 1; i >= 0; i--) {
+               res = tb_port_add_nfc_credits(path->hops[i].in_port,
+                                             path->nfc_credits);
+               if (res) {
+                       __tb_path_deallocate_nfc(path, i);
+                       goto err;
+               }
+       }
+
+       /* Activate hops. */
+       for (i = path->path_length - 1; i >= 0; i--) {
+               struct tb_regs_hop hop;
+
+               /* dword 0 */
+               hop.next_hop = path->hops[i].next_hop_index;
+               hop.out_port = path->hops[i].out_port->port;
+               /* TODO: figure out why these are good values */
+               hop.initial_credits = (i == path->path_length - 1) ? 16 : 7;
+               hop.unknown1 = 0;
+               hop.enable = 1;
+
+               /* dword 1 */
+               out_mask = (i == path->path_length - 1) ?
+                               TB_PATH_DESTINATION : TB_PATH_INTERNAL;
+               in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL;
+               hop.weight = path->weight;
+               hop.unknown2 = 0;
+               hop.priority = path->priority;
+               hop.drop_packages = path->drop_packages;
+               hop.counter = path->hops[i].in_counter_index;
+               hop.counter_enable = path->hops[i].in_counter_index != -1;
+               hop.ingress_fc = path->ingress_fc_enable & in_mask;
+               hop.egress_fc = path->egress_fc_enable & out_mask;
+               hop.ingress_shared_buffer = path->ingress_shared_buffer
+                                           & in_mask;
+               hop.egress_shared_buffer = path->egress_shared_buffer
+                                           & out_mask;
+               hop.unknown3 = 0;
+
+               tb_port_info(path->hops[i].in_port, "Writing hop %d, index %d",
+                            i, path->hops[i].in_hop_index);
+               tb_dump_hop(path->hops[i].in_port, &hop);
+               res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
+                                   2 * path->hops[i].in_hop_index, 2);
+               if (res) {
+                       __tb_path_deactivate_hops(path, i);
+                       __tb_path_deallocate_nfc(path, 0);
+                       goto err;
+               }
+       }
+       path->activated = true;
+       tb_info(path->tb, "path activation complete\n");
+       return 0;
+err:
+       tb_WARN(path->tb, "path activation failed\n");
+       return res;
+}
+
+/**
+ * tb_path_is_invalid() - check whether any ports on the path are invalid
+ *
+ * Return: Returns true if the path is invalid, false otherwise.
+ */
+bool tb_path_is_invalid(struct tb_path *path)
+{
+       int i = 0;
+       for (i = 0; i < path->path_length; i++) {
+               if (path->hops[i].in_port->sw->is_unplugged)
+                       return true;
+               if (path->hops[i].out_port->sw->is_unplugged)
+                       return true;
+       }
+       return false;
+}
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
new file mode 100644 (file)
index 0000000..aeb9829
--- /dev/null
@@ -0,0 +1,507 @@
+/*
+ * Thunderbolt Cactus Ridge driver - switch/port utility functions
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "tb.h"
+
+/* port utility functions */
+
+static const char *tb_port_type(struct tb_regs_port_header *port)
+{
+       switch (port->type >> 16) {
+       case 0:
+               switch ((u8) port->type) {
+               case 0:
+                       return "Inactive";
+               case 1:
+                       return "Port";
+               case 2:
+                       return "NHI";
+               default:
+                       return "unknown";
+               }
+       case 0x2:
+               return "Ethernet";
+       case 0x8:
+               return "SATA";
+       case 0xe:
+               return "DP/HDMI";
+       case 0x10:
+               return "PCIe";
+       case 0x20:
+               return "USB";
+       default:
+               return "unknown";
+       }
+}
+
+static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
+{
+       tb_info(tb,
+               " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
+               port->port_number, port->vendor_id, port->device_id,
+               port->revision, port->thunderbolt_version, tb_port_type(port),
+               port->type);
+       tb_info(tb, "  Max hop id (in/out): %d/%d\n",
+               port->max_in_hop_id, port->max_out_hop_id);
+       tb_info(tb, "  Max counters: %d\n", port->max_counters);
+       tb_info(tb, "  NFC Credits: %#x\n", port->nfc_credits);
+}
+
+/**
+ * tb_port_state() - get connectedness state of a port
+ *
+ * The port must have a TB_CAP_PHY (i.e. it should be a real port).
+ *
+ * Return: Returns an enum tb_port_state on success or an error code on failure.
+ */
+static int tb_port_state(struct tb_port *port)
+{
+       struct tb_cap_phy phy;
+       int res;
+       if (port->cap_phy == 0) {
+               tb_port_WARN(port, "does not have a PHY\n");
+               return -EINVAL;
+       }
+       res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
+       if (res)
+               return res;
+       return phy.state;
+}
+
+/**
+ * tb_wait_for_port() - wait for a port to become ready
+ *
+ * Wait up to 1 second for a port to reach state TB_PORT_UP. If
+ * wait_if_unplugged is set then we also wait if the port is in state
+ * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
+ * switch resume). Otherwise we only wait if a device is registered but the link
+ * has not yet been established.
+ *
+ * Return: Returns an error code on failure. Returns 0 if the port is not
+ * connected or failed to reach state TB_PORT_UP within one second. Returns 1
+ * if the port is connected and in state TB_PORT_UP.
+ */
+int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
+{
+       int retries = 10;
+       int state;
+       if (!port->cap_phy) {
+               tb_port_WARN(port, "does not have PHY\n");
+               return -EINVAL;
+       }
+       if (tb_is_upstream_port(port)) {
+               tb_port_WARN(port, "is the upstream port\n");
+               return -EINVAL;
+       }
+
+       while (retries--) {
+               state = tb_port_state(port);
+               if (state < 0)
+                       return state;
+               if (state == TB_PORT_DISABLED) {
+                       tb_port_info(port, "is disabled (state: 0)\n");
+                       return 0;
+               }
+               if (state == TB_PORT_UNPLUGGED) {
+                       if (wait_if_unplugged) {
+                               /* used during resume */
+                               tb_port_info(port,
+                                            "is unplugged (state: 7), retrying...\n");
+                               msleep(100);
+                               continue;
+                       }
+                       tb_port_info(port, "is unplugged (state: 7)\n");
+                       return 0;
+               }
+               if (state == TB_PORT_UP) {
+                       tb_port_info(port,
+                                    "is connected, link is up (state: 2)\n");
+                       return 1;
+               }
+
+               /*
+                * After plug-in the state is TB_PORT_CONNECTING. Give it some
+                * time.
+                */
+               tb_port_info(port,
+                            "is connected, link is not up (state: %d), retrying...\n",
+                            state);
+               msleep(100);
+       }
+       tb_port_warn(port,
+                    "failed to reach state TB_PORT_UP. Ignoring port...\n");
+       return 0;
+}
+
+/**
+ * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
+ *
+ * Change the number of NFC credits allocated to @port by @credits. To remove
+ * NFC credits pass a negative amount of credits.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_port_add_nfc_credits(struct tb_port *port, int credits)
+{
+       if (credits == 0)
+               return 0;
+       tb_port_info(port,
+                    "adding %#x NFC credits (%#x -> %#x)",
+                    credits,
+                    port->config.nfc_credits,
+                    port->config.nfc_credits + credits);
+       port->config.nfc_credits += credits;
+       return tb_port_write(port, &port->config.nfc_credits,
+                            TB_CFG_PORT, 4, 1);
+}
+
+/**
+ * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_port_clear_counter(struct tb_port *port, int counter)
+{
+       u32 zero[3] = { 0, 0, 0 };
+       tb_port_info(port, "clearing counter %d\n", counter);
+       return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
+}
+
+/**
+ * tb_init_port() - initialize a port
+ *
+ * This is a helper method for tb_switch_alloc. Does not check or initialize
+ * any downstream switches.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_init_port(struct tb_port *port)
+{
+       int res;
+       int cap;
+
+       res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
+       if (res)
+               return res;
+
+       /* Port 0 is the switch itself and has no PHY. */
+       if (port->config.type == TB_TYPE_PORT && port->port != 0) {
+               cap = tb_find_cap(port, TB_CFG_PORT, TB_CAP_PHY);
+
+               if (cap > 0)
+                       port->cap_phy = cap;
+               else
+                       tb_port_WARN(port, "non switch port without a PHY\n");
+       }
+
+       tb_dump_port(port->sw->tb, &port->config);
+
+       /* TODO: Read dual link port, DP port and more from EEPROM. */
+       return 0;
+
+}
+
+/* switch utility functions */
+
+static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
+{
+       tb_info(tb,
+               " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
+               sw->vendor_id, sw->device_id, sw->revision,
+               sw->thunderbolt_version);
+       tb_info(tb, "  Max Port Number: %d\n", sw->max_port_number);
+       tb_info(tb, "  Config:\n");
+       tb_info(tb,
+               "   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
+               sw->upstream_port_number, sw->depth,
+               (((u64) sw->route_hi) << 32) | sw->route_lo,
+               sw->enabled, sw->plug_events_delay);
+       tb_info(tb,
+               "   unknown1: %#x unknown4: %#x\n",
+               sw->__unknown1, sw->__unknown4);
+}
+
+/**
+ * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_switch_reset(struct tb *tb, u64 route)
+{
+       struct tb_cfg_result res;
+       struct tb_regs_switch_header header = {
+               header.route_hi = route >> 32,
+               header.route_lo = route,
+               header.enabled = true,
+       };
+       tb_info(tb, "resetting switch at %llx\n", route);
+       res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
+                       0, 2, 2, 2);
+       if (res.err)
+               return res.err;
+       res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
+       if (res.err > 0)
+               return -EIO;
+       return res.err;
+}
+
+struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route)
+{
+       u8 next_port = route; /*
+                              * Routes use a stride of 8 bits,
+                              * eventhough a port index has 6 bits at most.
+                              * */
+       if (route == 0)
+               return sw;
+       if (next_port > sw->config.max_port_number)
+               return NULL;
+       if (tb_is_upstream_port(&sw->ports[next_port]))
+               return NULL;
+       if (!sw->ports[next_port].remote)
+               return NULL;
+       return get_switch_at_route(sw->ports[next_port].remote->sw,
+                                  route >> TB_ROUTE_SHIFT);
+}
+
+/**
+ * tb_plug_events_active() - enable/disable plug events on a switch
+ *
+ * Also configures a sane plug_events_delay of 255ms.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_plug_events_active(struct tb_switch *sw, bool active)
+{
+       u32 data;
+       int res;
+
+       sw->config.plug_events_delay = 0xff;
+       res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
+       if (res)
+               return res;
+
+       res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
+       if (res)
+               return res;
+
+       if (active) {
+               data = data & 0xFFFFFF83;
+               switch (sw->config.device_id) {
+               case 0x1513:
+               case 0x151a:
+               case 0x1549:
+                       break;
+               default:
+                       data |= 4;
+               }
+       } else {
+               data = data | 0x7c;
+       }
+       return tb_sw_write(sw, &data, TB_CFG_SWITCH,
+                          sw->cap_plug_events + 1, 1);
+}
+
+
+/**
+ * tb_switch_free() - free a tb_switch and all downstream switches
+ */
+void tb_switch_free(struct tb_switch *sw)
+{
+       int i;
+       /* port 0 is the switch itself and never has a remote */
+       for (i = 1; i <= sw->config.max_port_number; i++) {
+               if (tb_is_upstream_port(&sw->ports[i]))
+                       continue;
+               if (sw->ports[i].remote)
+                       tb_switch_free(sw->ports[i].remote->sw);
+               sw->ports[i].remote = NULL;
+       }
+
+       if (!sw->is_unplugged)
+               tb_plug_events_active(sw, false);
+
+       kfree(sw->ports);
+       kfree(sw->drom);
+       kfree(sw);
+}
+
+/**
+ * tb_switch_alloc() - allocate and initialize a switch
+ *
+ * Return: Returns a NULL on failure.
+ */
+struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
+{
+       int i;
+       int cap;
+       struct tb_switch *sw;
+       int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
+       if (upstream_port < 0)
+               return NULL;
+
+       sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+       if (!sw)
+               return NULL;
+
+       sw->tb = tb;
+       if (tb_cfg_read(tb->ctl, &sw->config, route, 0, 2, 0, 5))
+               goto err;
+       tb_info(tb,
+               "initializing Switch at %#llx (depth: %d, up port: %d)\n",
+               route, tb_route_length(route), upstream_port);
+       tb_info(tb, "old switch config:\n");
+       tb_dump_switch(tb, &sw->config);
+
+       /* configure switch */
+       sw->config.upstream_port_number = upstream_port;
+       sw->config.depth = tb_route_length(route);
+       sw->config.route_lo = route;
+       sw->config.route_hi = route >> 32;
+       sw->config.enabled = 1;
+       /* from here on we may use the tb_sw_* functions & macros */
+
+       if (sw->config.vendor_id != 0x8086)
+               tb_sw_warn(sw, "unknown switch vendor id %#x\n",
+                          sw->config.vendor_id);
+
+       if (sw->config.device_id != 0x1547 && sw->config.device_id != 0x1549)
+               tb_sw_warn(sw, "unsupported switch device id %#x\n",
+                          sw->config.device_id);
+
+       /* upload configuration */
+       if (tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3))
+               goto err;
+
+       /* initialize ports */
+       sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
+                               GFP_KERNEL);
+       if (!sw->ports)
+               goto err;
+
+       for (i = 0; i <= sw->config.max_port_number; i++) {
+               /* minimum setup for tb_find_cap and tb_drom_read to work */
+               sw->ports[i].sw = sw;
+               sw->ports[i].port = i;
+       }
+
+       cap = tb_find_cap(&sw->ports[0], TB_CFG_SWITCH, TB_CAP_PLUG_EVENTS);
+       if (cap < 0) {
+               tb_sw_warn(sw, "cannot find TB_CAP_PLUG_EVENTS aborting\n");
+               goto err;
+       }
+       sw->cap_plug_events = cap;
+
+       /* read drom */
+       if (tb_drom_read(sw))
+               tb_sw_warn(sw, "tb_eeprom_read_rom failed, continuing\n");
+       tb_sw_info(sw, "uid: %#llx\n", sw->uid);
+
+       for (i = 0; i <= sw->config.max_port_number; i++) {
+               if (sw->ports[i].disabled) {
+                       tb_port_info(&sw->ports[i], "disabled by eeprom\n");
+                       continue;
+               }
+               if (tb_init_port(&sw->ports[i]))
+                       goto err;
+       }
+
+       /* TODO: I2C, IECS, link controller */
+
+       if (tb_plug_events_active(sw, true))
+               goto err;
+
+       return sw;
+err:
+       kfree(sw->ports);
+       kfree(sw->drom);
+       kfree(sw);
+       return NULL;
+}
+
+/**
+ * tb_sw_set_unpplugged() - set is_unplugged on switch and downstream switches
+ */
+void tb_sw_set_unpplugged(struct tb_switch *sw)
+{
+       int i;
+       if (sw == sw->tb->root_switch) {
+               tb_sw_WARN(sw, "cannot unplug root switch\n");
+               return;
+       }
+       if (sw->is_unplugged) {
+               tb_sw_WARN(sw, "is_unplugged already set\n");
+               return;
+       }
+       sw->is_unplugged = true;
+       for (i = 0; i <= sw->config.max_port_number; i++) {
+               if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
+                       tb_sw_set_unpplugged(sw->ports[i].remote->sw);
+       }
+}
+
+int tb_switch_resume(struct tb_switch *sw)
+{
+       int i, err;
+       u64 uid;
+       tb_sw_info(sw, "resuming switch\n");
+
+       err = tb_drom_read_uid_only(sw, &uid);
+       if (err) {
+               tb_sw_warn(sw, "uid read failed\n");
+               return err;
+       }
+       if (sw->uid != uid) {
+               tb_sw_info(sw,
+                       "changed while suspended (uid %#llx -> %#llx)\n",
+                       sw->uid, uid);
+               return -ENODEV;
+       }
+
+       /* upload configuration */
+       err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
+       if (err)
+               return err;
+
+       err = tb_plug_events_active(sw, true);
+       if (err)
+               return err;
+
+       /* check for surviving downstream switches */
+       for (i = 1; i <= sw->config.max_port_number; i++) {
+               struct tb_port *port = &sw->ports[i];
+               if (tb_is_upstream_port(port))
+                       continue;
+               if (!port->remote)
+                       continue;
+               if (tb_wait_for_port(port, true) <= 0
+                       || tb_switch_resume(port->remote->sw)) {
+                       tb_port_warn(port,
+                                    "lost during suspend, disconnecting\n");
+                       tb_sw_set_unpplugged(port->remote->sw);
+               }
+       }
+       return 0;
+}
+
+void tb_switch_suspend(struct tb_switch *sw)
+{
+       int i, err;
+       err = tb_plug_events_active(sw, false);
+       if (err)
+               return;
+
+       for (i = 1; i <= sw->config.max_port_number; i++) {
+               if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
+                       tb_switch_suspend(sw->ports[i].remote->sw);
+       }
+       /*
+        * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any
+        * effect?
+        */
+}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
new file mode 100644 (file)
index 0000000..d2c3fe3
--- /dev/null
@@ -0,0 +1,436 @@
+/*
+ * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+
+#include "tb.h"
+#include "tb_regs.h"
+#include "tunnel_pci.h"
+
+
+/* enumeration & hot plug handling */
+
+
+static void tb_scan_port(struct tb_port *port);
+
+/**
+ * tb_scan_switch() - scan for and initialize downstream switches
+ */
+static void tb_scan_switch(struct tb_switch *sw)
+{
+       int i;
+       for (i = 1; i <= sw->config.max_port_number; i++)
+               tb_scan_port(&sw->ports[i]);
+}
+
+/**
+ * tb_scan_port() - check for and initialize switches below port
+ */
+static void tb_scan_port(struct tb_port *port)
+{
+       struct tb_switch *sw;
+       if (tb_is_upstream_port(port))
+               return;
+       if (port->config.type != TB_TYPE_PORT)
+               return;
+       if (port->dual_link_port && port->link_nr)
+               return; /*
+                        * Downstream switch is reachable through two ports.
+                        * Only scan on the primary port (link_nr == 0).
+                        */
+       if (tb_wait_for_port(port, false) <= 0)
+               return;
+       if (port->remote) {
+               tb_port_WARN(port, "port already has a remote!\n");
+               return;
+       }
+       sw = tb_switch_alloc(port->sw->tb, tb_downstream_route(port));
+       if (!sw)
+               return;
+       port->remote = tb_upstream_port(sw);
+       tb_upstream_port(sw)->remote = port;
+       tb_scan_switch(sw);
+}
+
+/**
+ * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
+ */
+static void tb_free_invalid_tunnels(struct tb *tb)
+{
+       struct tb_pci_tunnel *tunnel;
+       struct tb_pci_tunnel *n;
+       list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
+       {
+               if (tb_pci_is_invalid(tunnel)) {
+                       tb_pci_deactivate(tunnel);
+                       tb_pci_free(tunnel);
+               }
+       }
+}
+
+/**
+ * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
+ */
+static void tb_free_unplugged_children(struct tb_switch *sw)
+{
+       int i;
+       for (i = 1; i <= sw->config.max_port_number; i++) {
+               struct tb_port *port = &sw->ports[i];
+               if (tb_is_upstream_port(port))
+                       continue;
+               if (!port->remote)
+                       continue;
+               if (port->remote->sw->is_unplugged) {
+                       tb_switch_free(port->remote->sw);
+                       port->remote = NULL;
+               } else {
+                       tb_free_unplugged_children(port->remote->sw);
+               }
+       }
+}
+
+
+/**
+ * find_pci_up_port() - return the first PCIe up port on @sw or NULL
+ */
+static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw)
+{
+       int i;
+       for (i = 1; i <= sw->config.max_port_number; i++)
+               if (sw->ports[i].config.type == TB_TYPE_PCIE_UP)
+                       return &sw->ports[i];
+       return NULL;
+}
+
+/**
+ * find_unused_down_port() - return the first inactive PCIe down port on @sw
+ */
+static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
+{
+       int i;
+       int cap;
+       int res;
+       int data;
+       for (i = 1; i <= sw->config.max_port_number; i++) {
+               if (tb_is_upstream_port(&sw->ports[i]))
+                       continue;
+               if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
+                       continue;
+               cap = tb_find_cap(&sw->ports[i], TB_CFG_PORT, TB_CAP_PCIE);
+               if (cap <= 0)
+                       continue;
+               res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
+               if (res < 0)
+                       continue;
+               if (data & 0x80000000)
+                       continue;
+               return &sw->ports[i];
+       }
+       return NULL;
+}
+
+/**
+ * tb_activate_pcie_devices() - scan for and activate PCIe devices
+ *
+ * This method is somewhat ad hoc. For now it only supports one device
+ * per port and only devices at depth 1.
+ */
+static void tb_activate_pcie_devices(struct tb *tb)
+{
+       int i;
+       int cap;
+       u32 data;
+       struct tb_switch *sw;
+       struct tb_port *up_port;
+       struct tb_port *down_port;
+       struct tb_pci_tunnel *tunnel;
+       /* scan for pcie devices at depth 1*/
+       for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
+               if (tb_is_upstream_port(&tb->root_switch->ports[i]))
+                       continue;
+               if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT)
+                       continue;
+               if (!tb->root_switch->ports[i].remote)
+                       continue;
+               sw = tb->root_switch->ports[i].remote->sw;
+               up_port = tb_find_pci_up_port(sw);
+               if (!up_port) {
+                       tb_sw_info(sw, "no PCIe devices found, aborting\n");
+                       continue;
+               }
+
+               /* check whether port is already activated */
+               cap = tb_find_cap(up_port, TB_CFG_PORT, TB_CAP_PCIE);
+               if (cap <= 0)
+                       continue;
+               if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
+                       continue;
+               if (data & 0x80000000) {
+                       tb_port_info(up_port,
+                                    "PCIe port already activated, aborting\n");
+                       continue;
+               }
+
+               down_port = tb_find_unused_down_port(tb->root_switch);
+               if (!down_port) {
+                       tb_port_info(up_port,
+                                    "All PCIe down ports are occupied, aborting\n");
+                       continue;
+               }
+               tunnel = tb_pci_alloc(tb, up_port, down_port);
+               if (!tunnel) {
+                       tb_port_info(up_port,
+                                    "PCIe tunnel allocation failed, aborting\n");
+                       continue;
+               }
+
+               if (tb_pci_activate(tunnel)) {
+                       tb_port_info(up_port,
+                                    "PCIe tunnel activation failed, aborting\n");
+                       tb_pci_free(tunnel);
+               }
+
+       }
+}
+
+/* hotplug handling */
+
+struct tb_hotplug_event {
+       struct work_struct work;
+       struct tb *tb;
+       u64 route;
+       u8 port;
+       bool unplug;
+};
+
+/**
+ * tb_handle_hotplug() - handle hotplug event
+ *
+ * Executes on tb->wq.
+ */
+static void tb_handle_hotplug(struct work_struct *work)
+{
+       struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
+       struct tb *tb = ev->tb;
+       struct tb_switch *sw;
+       struct tb_port *port;
+       mutex_lock(&tb->lock);
+       if (!tb->hotplug_active)
+               goto out; /* during init, suspend or shutdown */
+
+       sw = get_switch_at_route(tb->root_switch, ev->route);
+       if (!sw) {
+               tb_warn(tb,
+                       "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
+                       ev->route, ev->port, ev->unplug);
+               goto out;
+       }
+       if (ev->port > sw->config.max_port_number) {
+               tb_warn(tb,
+                       "hotplug event from non existent port %llx:%x (unplug: %d)\n",
+                       ev->route, ev->port, ev->unplug);
+               goto out;
+       }
+       port = &sw->ports[ev->port];
+       if (tb_is_upstream_port(port)) {
+               tb_warn(tb,
+                       "hotplug event for upstream port %llx:%x (unplug: %d)\n",
+                       ev->route, ev->port, ev->unplug);
+               goto out;
+       }
+       if (ev->unplug) {
+               if (port->remote) {
+                       tb_port_info(port, "unplugged\n");
+                       tb_sw_set_unpplugged(port->remote->sw);
+                       tb_free_invalid_tunnels(tb);
+                       tb_switch_free(port->remote->sw);
+                       port->remote = NULL;
+               } else {
+                       tb_port_info(port,
+                                    "got unplug event for disconnected port, ignoring\n");
+               }
+       } else if (port->remote) {
+               tb_port_info(port,
+                            "got plug event for connected port, ignoring\n");
+       } else {
+               tb_port_info(port, "hotplug: scanning\n");
+               tb_scan_port(port);
+               if (!port->remote) {
+                       tb_port_info(port, "hotplug: no switch found\n");
+               } else if (port->remote->sw->config.depth > 1) {
+                       tb_sw_warn(port->remote->sw,
+                                  "hotplug: chaining not supported\n");
+               } else {
+                       tb_sw_info(port->remote->sw,
+                                  "hotplug: activating pcie devices\n");
+                       tb_activate_pcie_devices(tb);
+               }
+       }
+out:
+       mutex_unlock(&tb->lock);
+       kfree(ev);
+}
+
+/**
+ * tb_schedule_hotplug_handler() - callback function for the control channel
+ *
+ * Delegates to tb_handle_hotplug.
+ */
+static void tb_schedule_hotplug_handler(void *data, u64 route, u8 port,
+                                       bool unplug)
+{
+       struct tb *tb = data;
+       struct tb_hotplug_event *ev = kmalloc(sizeof(*ev), GFP_KERNEL);
+       if (!ev)
+               return;
+       INIT_WORK(&ev->work, tb_handle_hotplug);
+       ev->tb = tb;
+       ev->route = route;
+       ev->port = port;
+       ev->unplug = unplug;
+       queue_work(tb->wq, &ev->work);
+}
+
+/**
+ * thunderbolt_shutdown_and_free() - shutdown everything
+ *
+ * Free all switches and the config channel.
+ *
+ * Used in the error path of thunderbolt_alloc_and_start.
+ */
+void thunderbolt_shutdown_and_free(struct tb *tb)
+{
+       struct tb_pci_tunnel *tunnel;
+       struct tb_pci_tunnel *n;
+
+       mutex_lock(&tb->lock);
+
+       /* tunnels are only present after everything has been initialized */
+       list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) {
+               tb_pci_deactivate(tunnel);
+               tb_pci_free(tunnel);
+       }
+
+       if (tb->root_switch)
+               tb_switch_free(tb->root_switch);
+       tb->root_switch = NULL;
+
+       if (tb->ctl) {
+               tb_ctl_stop(tb->ctl);
+               tb_ctl_free(tb->ctl);
+       }
+       tb->ctl = NULL;
+       tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
+
+       /* allow tb_handle_hotplug to acquire the lock */
+       mutex_unlock(&tb->lock);
+       if (tb->wq) {
+               flush_workqueue(tb->wq);
+               destroy_workqueue(tb->wq);
+               tb->wq = NULL;
+       }
+       mutex_destroy(&tb->lock);
+       kfree(tb);
+}
+
+/**
+ * thunderbolt_alloc_and_start() - setup the thunderbolt bus
+ *
+ * Allocates a tb_cfg control channel, initializes the root switch, enables
+ * plug events and activates pci devices.
+ *
+ * Return: Returns NULL on error.
+ */
+struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
+{
+       struct tb *tb;
+
+       BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
+       BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
+       BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
+
+       tb = kzalloc(sizeof(*tb), GFP_KERNEL);
+       if (!tb)
+               return NULL;
+
+       tb->nhi = nhi;
+       mutex_init(&tb->lock);
+       mutex_lock(&tb->lock);
+       INIT_LIST_HEAD(&tb->tunnel_list);
+
+       tb->wq = alloc_ordered_workqueue("thunderbolt", 0);
+       if (!tb->wq)
+               goto err_locked;
+
+       tb->ctl = tb_ctl_alloc(tb->nhi, tb_schedule_hotplug_handler, tb);
+       if (!tb->ctl)
+               goto err_locked;
+       /*
+        * tb_schedule_hotplug_handler may be called as soon as the config
+        * channel is started. Thats why we have to hold the lock here.
+        */
+       tb_ctl_start(tb->ctl);
+
+       tb->root_switch = tb_switch_alloc(tb, 0);
+       if (!tb->root_switch)
+               goto err_locked;
+
+       /* Full scan to discover devices added before the driver was loaded. */
+       tb_scan_switch(tb->root_switch);
+       tb_activate_pcie_devices(tb);
+
+       /* Allow tb_handle_hotplug to progress events */
+       tb->hotplug_active = true;
+       mutex_unlock(&tb->lock);
+       return tb;
+
+err_locked:
+       mutex_unlock(&tb->lock);
+       thunderbolt_shutdown_and_free(tb);
+       return NULL;
+}
+
+void thunderbolt_suspend(struct tb *tb)
+{
+       tb_info(tb, "suspending...\n");
+       mutex_lock(&tb->lock);
+       tb_switch_suspend(tb->root_switch);
+       tb_ctl_stop(tb->ctl);
+       tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
+       mutex_unlock(&tb->lock);
+       tb_info(tb, "suspend finished\n");
+}
+
+void thunderbolt_resume(struct tb *tb)
+{
+       struct tb_pci_tunnel *tunnel, *n;
+       tb_info(tb, "resuming...\n");
+       mutex_lock(&tb->lock);
+       tb_ctl_start(tb->ctl);
+
+       /* remove any pci devices the firmware might have setup */
+       tb_switch_reset(tb, 0);
+
+       tb_switch_resume(tb->root_switch);
+       tb_free_invalid_tunnels(tb);
+       tb_free_unplugged_children(tb->root_switch);
+       list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
+               tb_pci_restart(tunnel);
+       if (!list_empty(&tb->tunnel_list)) {
+               /*
+                * the pcie links need some time to get going.
+                * 100ms works for me...
+                */
+               tb_info(tb, "tunnels restarted, sleeping for 100ms\n");
+               msleep(100);
+       }
+        /* Allow tb_handle_hotplug to progress events */
+       tb->hotplug_active = true;
+       mutex_unlock(&tb->lock);
+       tb_info(tb, "resume finished\n");
+}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
new file mode 100644 (file)
index 0000000..8b0d7cf
--- /dev/null
@@ -0,0 +1,271 @@
+/*
+ * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef TB_H_
+#define TB_H_
+
+#include <linux/pci.h>
+
+#include "tb_regs.h"
+#include "ctl.h"
+
+/**
+ * struct tb_switch - a thunderbolt switch
+ */
+struct tb_switch {
+       struct tb_regs_switch_header config;
+       struct tb_port *ports;
+       struct tb *tb;
+       u64 uid;
+       int cap_plug_events; /* offset, zero if not found */
+       bool is_unplugged; /* unplugged, will go away */
+       u8 *drom;
+};
+
+/**
+ * struct tb_port - a thunderbolt port, part of a tb_switch
+ */
+struct tb_port {
+       struct tb_regs_port_header config;
+       struct tb_switch *sw;
+       struct tb_port *remote; /* remote port, NULL if not connected */
+       int cap_phy; /* offset, zero if not found */
+       u8 port; /* port number on switch */
+       bool disabled; /* disabled by eeprom */
+       struct tb_port *dual_link_port;
+       u8 link_nr:1;
+};
+
+/**
+ * struct tb_path_hop - routing information for a tb_path
+ *
+ * Hop configuration is always done on the IN port of a switch.
+ * in_port and out_port have to be on the same switch. Packets arriving on
+ * in_port with "hop" = in_hop_index will get routed to through out_port. The
+ * next hop to take (on out_port->remote) is determined by next_hop_index.
+ *
+ * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in
+ * port.
+ */
+struct tb_path_hop {
+       struct tb_port *in_port;
+       struct tb_port *out_port;
+       int in_hop_index;
+       int in_counter_index; /* write -1 to disable counters for this hop. */
+       int next_hop_index;
+};
+
+/**
+ * enum tb_path_port - path options mask
+ */
+enum tb_path_port {
+       TB_PATH_NONE = 0,
+       TB_PATH_SOURCE = 1, /* activate on the first hop (out of src) */
+       TB_PATH_INTERNAL = 2, /* activate on other hops (not the first/last) */
+       TB_PATH_DESTINATION = 4, /* activate on the last hop (into dst) */
+       TB_PATH_ALL = 7,
+};
+
+/**
+ * struct tb_path - a unidirectional path between two ports
+ *
+ * A path consists of a number of hops (see tb_path_hop). To establish a PCIe
+ * tunnel two paths have to be created between the two PCIe ports.
+ *
+ */
+struct tb_path {
+       struct tb *tb;
+       int nfc_credits; /* non flow controlled credits */
+       enum tb_path_port ingress_shared_buffer;
+       enum tb_path_port egress_shared_buffer;
+       enum tb_path_port ingress_fc_enable;
+       enum tb_path_port egress_fc_enable;
+
+       int priority:3;
+       int weight:4;
+       bool drop_packages;
+       bool activated;
+       struct tb_path_hop *hops;
+       int path_length; /* number of hops */
+};
+
+
+/**
+ * struct tb - main thunderbolt bus structure
+ */
+struct tb {
+       struct mutex lock;      /*
+                                * Big lock. Must be held when accessing cfg or
+                                * any struct tb_switch / struct tb_port.
+                                */
+       struct tb_nhi *nhi;
+       struct tb_ctl *ctl;
+       struct workqueue_struct *wq; /* ordered workqueue for plug events */
+       struct tb_switch *root_switch;
+       struct list_head tunnel_list; /* list of active PCIe tunnels */
+       bool hotplug_active; /*
+                             * tb_handle_hotplug will stop progressing plug
+                             * events and exit if this is not set (it needs to
+                             * acquire the lock one more time). Used to drain
+                             * wq after cfg has been paused.
+                             */
+
+};
+
+/* helper functions & macros */
+
+/**
+ * tb_upstream_port() - return the upstream port of a switch
+ *
+ * Every switch has an upstream port (for the root switch it is the NHI).
+ *
+ * During switch alloc/init tb_upstream_port()->remote may be NULL, even for
+ * non root switches (on the NHI port remote is always NULL).
+ *
+ * Return: Returns the upstream port of the switch.
+ */
+static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
+{
+       return &sw->ports[sw->config.upstream_port_number];
+}
+
+static inline u64 tb_route(struct tb_switch *sw)
+{
+       return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
+}
+
+static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
+                            enum tb_cfg_space space, u32 offset, u32 length)
+{
+       return tb_cfg_read(sw->tb->ctl,
+                          buffer,
+                          tb_route(sw),
+                          0,
+                          space,
+                          offset,
+                          length);
+}
+
+static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
+                             enum tb_cfg_space space, u32 offset, u32 length)
+{
+       return tb_cfg_write(sw->tb->ctl,
+                           buffer,
+                           tb_route(sw),
+                           0,
+                           space,
+                           offset,
+                           length);
+}
+
+static inline int tb_port_read(struct tb_port *port, void *buffer,
+                              enum tb_cfg_space space, u32 offset, u32 length)
+{
+       return tb_cfg_read(port->sw->tb->ctl,
+                          buffer,
+                          tb_route(port->sw),
+                          port->port,
+                          space,
+                          offset,
+                          length);
+}
+
+static inline int tb_port_write(struct tb_port *port, void *buffer,
+                               enum tb_cfg_space space, u32 offset, u32 length)
+{
+       return tb_cfg_write(port->sw->tb->ctl,
+                           buffer,
+                           tb_route(port->sw),
+                           port->port,
+                           space,
+                           offset,
+                           length);
+}
+
+#define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg)
+#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
+#define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
+#define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
+
+
+#define __TB_SW_PRINT(level, sw, fmt, arg...)           \
+       do {                                            \
+               struct tb_switch *__sw = (sw);          \
+               level(__sw->tb, "%llx: " fmt,           \
+                     tb_route(__sw), ## arg);          \
+       } while (0)
+#define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
+#define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
+#define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
+
+
+#define __TB_PORT_PRINT(level, _port, fmt, arg...)                      \
+       do {                                                            \
+               struct tb_port *__port = (_port);                       \
+               level(__port->sw->tb, "%llx:%x: " fmt,                  \
+                     tb_route(__port->sw), __port->port, ## arg);      \
+       } while (0)
+#define tb_port_WARN(port, fmt, arg...) \
+       __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg)
+#define tb_port_warn(port, fmt, arg...) \
+       __TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
+#define tb_port_info(port, fmt, arg...) \
+       __TB_PORT_PRINT(tb_info, port, fmt, ##arg)
+
+
+struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi);
+void thunderbolt_shutdown_and_free(struct tb *tb);
+void thunderbolt_suspend(struct tb *tb);
+void thunderbolt_resume(struct tb *tb);
+
+struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route);
+void tb_switch_free(struct tb_switch *sw);
+void tb_switch_suspend(struct tb_switch *sw);
+int tb_switch_resume(struct tb_switch *sw);
+int tb_switch_reset(struct tb *tb, u64 route);
+void tb_sw_set_unpplugged(struct tb_switch *sw);
+struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
+
+int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
+int tb_port_add_nfc_credits(struct tb_port *port, int credits);
+int tb_port_clear_counter(struct tb_port *port, int counter);
+
+int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap);
+
+struct tb_path *tb_path_alloc(struct tb *tb, int num_hops);
+void tb_path_free(struct tb_path *path);
+int tb_path_activate(struct tb_path *path);
+void tb_path_deactivate(struct tb_path *path);
+bool tb_path_is_invalid(struct tb_path *path);
+
+int tb_drom_read(struct tb_switch *sw);
+int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
+
+
+static inline int tb_route_length(u64 route)
+{
+       return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
+}
+
+static inline bool tb_is_upstream_port(struct tb_port *port)
+{
+       return port == tb_upstream_port(port->sw);
+}
+
+/**
+ * tb_downstream_route() - get route to downstream switch
+ *
+ * Port must not be the upstream port (otherwise a loop is created).
+ *
+ * Return: Returns a route to the switch behind @port.
+ */
+static inline u64 tb_downstream_route(struct tb_port *port)
+{
+       return tb_route(port->sw)
+              | ((u64) port->port << (port->sw->config.depth * 8));
+}
+
+#endif
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
new file mode 100644 (file)
index 0000000..6577af7
--- /dev/null
@@ -0,0 +1,213 @@
+/*
+ * Thunderbolt Cactus Ridge driver - Port/Switch config area registers
+ *
+ * Every thunderbolt device consists (logically) of a switch with multiple
+ * ports. Every port contains up to four config regions (HOPS, PORT, SWITCH,
+ * COUNTERS) which are used to configure the device.
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef _TB_REGS
+#define _TB_REGS
+
+#include <linux/types.h>
+
+
+#define TB_ROUTE_SHIFT 8  /* number of bits in a port entry of a route */
+
+
+/*
+ * TODO: should be 63? But we do not know how to receive frames larger than 256
+ * bytes at the frame level. (header + checksum = 16, 60*4 = 240)
+ */
+#define TB_MAX_CONFIG_RW_LENGTH 60
+
+enum tb_cap {
+       TB_CAP_PHY              = 0x0001,
+       TB_CAP_TIME1            = 0x0003,
+       TB_CAP_PCIE             = 0x0004,
+       TB_CAP_I2C              = 0x0005,
+       TB_CAP_PLUG_EVENTS      = 0x0105, /* also EEPROM */
+       TB_CAP_TIME2            = 0x0305,
+       TB_CAL_IECS             = 0x0405,
+       TB_CAP_LINK_CONTROLLER  = 0x0605, /* also IECS */
+};
+
+enum tb_port_state {
+       TB_PORT_DISABLED        = 0, /* tb_cap_phy.disable == 1 */
+       TB_PORT_CONNECTING      = 1, /* retry */
+       TB_PORT_UP              = 2,
+       TB_PORT_UNPLUGGED       = 7,
+};
+
+/* capability headers */
+
+struct tb_cap_basic {
+       u8 next;
+       /* enum tb_cap cap:8; prevent "narrower than values of its type" */
+       u8 cap; /* if cap == 0x05 then we have a extended capability */
+} __packed;
+
+struct tb_cap_extended_short {
+       u8 next; /* if next and length are zero then we have a long cap */
+       enum tb_cap cap:16;
+       u8 length;
+} __packed;
+
+struct tb_cap_extended_long {
+       u8 zero1;
+       enum tb_cap cap:16;
+       u8 zero2;
+       u16 next;
+       u16 length;
+} __packed;
+
+/* capabilities */
+
+struct tb_cap_link_controller {
+       struct tb_cap_extended_long cap_header;
+       u32 count:4; /* number of link controllers */
+       u32 unknown1:4;
+       u32 base_offset:8; /*
+                           * offset (into this capability) of the configuration
+                           * area of the first link controller
+                           */
+       u32 length:12; /* link controller configuration area length */
+       u32 unknown2:4; /* TODO check that length is correct */
+} __packed;
+
+struct tb_cap_phy {
+       struct tb_cap_basic cap_header;
+       u32 unknown1:16;
+       u32 unknown2:14;
+       bool disable:1;
+       u32 unknown3:11;
+       enum tb_port_state state:4;
+       u32 unknown4:2;
+} __packed;
+
+struct tb_eeprom_ctl {
+       bool clock:1; /* send pulse to transfer one bit */
+       bool access_low:1; /* set to 0 before access */
+       bool data_out:1; /* to eeprom */
+       bool data_in:1; /* from eeprom */
+       bool access_high:1; /* set to 1 before access */
+       bool not_present:1; /* should be 0 */
+       bool unknown1:1;
+       bool present:1; /* should be 1 */
+       u32 unknown2:24;
+} __packed;
+
+struct tb_cap_plug_events {
+       struct tb_cap_extended_short cap_header;
+       u32 __unknown1:2;
+       u32 plug_events:5;
+       u32 __unknown2:25;
+       u32 __unknown3;
+       u32 __unknown4;
+       struct tb_eeprom_ctl eeprom_ctl;
+       u32 __unknown5[7];
+       u32 drom_offset; /* 32 bit register, but eeprom addresses are 16 bit */
+} __packed;
+
+/* device headers */
+
+/* Present on port 0 in TB_CFG_SWITCH at address zero. */
+struct tb_regs_switch_header {
+       /* DWORD 0 */
+       u16 vendor_id;
+       u16 device_id;
+       /* DWORD 1 */
+       u32 first_cap_offset:8;
+       u32 upstream_port_number:6;
+       u32 max_port_number:6;
+       u32 depth:3;
+       u32 __unknown1:1;
+       u32 revision:8;
+       /* DWORD 2 */
+       u32 route_lo;
+       /* DWORD 3 */
+       u32 route_hi:31;
+       bool enabled:1;
+       /* DWORD 4 */
+       u32 plug_events_delay:8; /*
+                                 * RW, pause between plug events in
+                                 * milliseconds. Writing 0x00 is interpreted
+                                 * as 255ms.
+                                 */
+       u32 __unknown4:16;
+       u32 thunderbolt_version:8;
+} __packed;
+
+enum tb_port_type {
+       TB_TYPE_INACTIVE        = 0x000000,
+       TB_TYPE_PORT            = 0x000001,
+       TB_TYPE_NHI             = 0x000002,
+       /* TB_TYPE_ETHERNET     = 0x020000, lower order bits are not known */
+       /* TB_TYPE_SATA         = 0x080000, lower order bits are not known */
+       TB_TYPE_DP_HDMI_IN      = 0x0e0101,
+       TB_TYPE_DP_HDMI_OUT     = 0x0e0102,
+       TB_TYPE_PCIE_DOWN       = 0x100101,
+       TB_TYPE_PCIE_UP         = 0x100102,
+       /* TB_TYPE_USB          = 0x200000, lower order bits are not known */
+};
+
+/* Present on every port in TB_CF_PORT at address zero. */
+struct tb_regs_port_header {
+       /* DWORD 0 */
+       u16 vendor_id;
+       u16 device_id;
+       /* DWORD 1 */
+       u32 first_cap_offset:8;
+       u32 max_counters:11;
+       u32 __unknown1:5;
+       u32 revision:8;
+       /* DWORD 2 */
+       enum tb_port_type type:24;
+       u32 thunderbolt_version:8;
+       /* DWORD 3 */
+       u32 __unknown2:20;
+       u32 port_number:6;
+       u32 __unknown3:6;
+       /* DWORD 4 */
+       u32 nfc_credits;
+       /* DWORD 5 */
+       u32 max_in_hop_id:11;
+       u32 max_out_hop_id:11;
+       u32 __unkown4:10;
+       /* DWORD 6 */
+       u32 __unknown5;
+       /* DWORD 7 */
+       u32 __unknown6;
+
+} __packed;
+
+/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
+struct tb_regs_hop {
+       /* DWORD 0 */
+       u32 next_hop:11; /*
+                         * hop to take after sending the packet through
+                         * out_port (on the incoming port of the next switch)
+                         */
+       u32 out_port:6; /* next port of the path (on the same switch) */
+       u32 initial_credits:8;
+       u32 unknown1:6; /* set to zero */
+       bool enable:1;
+
+       /* DWORD 1 */
+       u32 weight:4;
+       u32 unknown2:4; /* set to zero */
+       u32 priority:3;
+       bool drop_packages:1;
+       u32 counter:11; /* index into TB_CFG_COUNTERS on this port */
+       bool counter_enable:1;
+       bool ingress_fc:1;
+       bool egress_fc:1;
+       bool ingress_shared_buffer:1;
+       bool egress_shared_buffer:1;
+       u32 unknown3:4; /* set to zero */
+} __packed;
+
+
+#endif
diff --git a/drivers/thunderbolt/tunnel_pci.c b/drivers/thunderbolt/tunnel_pci.c
new file mode 100644 (file)
index 0000000..baf1cd3
--- /dev/null
@@ -0,0 +1,232 @@
+/*
+ * Thunderbolt Cactus Ridge driver - PCIe tunnel
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include "tunnel_pci.h"
+#include "tb.h"
+
+#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
+       do {                                                            \
+               struct tb_pci_tunnel *__tunnel = (tunnel);              \
+               level(__tunnel->tb, "%llx:%x <-> %llx:%x (PCI): " fmt,  \
+                     tb_route(__tunnel->down_port->sw),                \
+                     __tunnel->down_port->port,                        \
+                     tb_route(__tunnel->up_port->sw),                  \
+                     __tunnel->up_port->port,                          \
+                     ## arg);                                          \
+       } while (0)
+
+#define tb_tunnel_WARN(tunnel, fmt, arg...) \
+       __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
+#define tb_tunnel_warn(tunnel, fmt, arg...) \
+       __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
+#define tb_tunnel_info(tunnel, fmt, arg...) \
+       __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
+
+static void tb_pci_init_path(struct tb_path *path)
+{
+       path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+       path->egress_shared_buffer = TB_PATH_NONE;
+       path->ingress_fc_enable = TB_PATH_ALL;
+       path->ingress_shared_buffer = TB_PATH_NONE;
+       path->priority = 3;
+       path->weight = 1;
+       path->drop_packages = 0;
+       path->nfc_credits = 0;
+}
+
+/**
+ * tb_pci_alloc() - allocate a pci tunnel
+ *
+ * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
+ * TB_TYPE_PCIE_DOWN.
+ *
+ * Currently only paths consisting of two hops are supported (that is the
+ * ports must be on "adjacent" switches).
+ *
+ * The paths are hard-coded to use hop 8 (the only working hop id available on
+ * my thunderbolt devices). Therefore at most ONE path per device may be
+ * activated.
+ *
+ * Return: Returns a tb_pci_tunnel on success or NULL on failure.
+ */
+struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
+                                  struct tb_port *down)
+{
+       struct tb_pci_tunnel *tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
+       if (!tunnel)
+               goto err;
+       tunnel->tb = tb;
+       tunnel->down_port = down;
+       tunnel->up_port = up;
+       INIT_LIST_HEAD(&tunnel->list);
+       tunnel->path_to_up = tb_path_alloc(up->sw->tb, 2);
+       if (!tunnel->path_to_up)
+               goto err;
+       tunnel->path_to_down = tb_path_alloc(up->sw->tb, 2);
+       if (!tunnel->path_to_down)
+               goto err;
+       tb_pci_init_path(tunnel->path_to_up);
+       tb_pci_init_path(tunnel->path_to_down);
+
+       tunnel->path_to_up->hops[0].in_port = down;
+       tunnel->path_to_up->hops[0].in_hop_index = 8;
+       tunnel->path_to_up->hops[0].in_counter_index = -1;
+       tunnel->path_to_up->hops[0].out_port = tb_upstream_port(up->sw)->remote;
+       tunnel->path_to_up->hops[0].next_hop_index = 8;
+
+       tunnel->path_to_up->hops[1].in_port = tb_upstream_port(up->sw);
+       tunnel->path_to_up->hops[1].in_hop_index = 8;
+       tunnel->path_to_up->hops[1].in_counter_index = -1;
+       tunnel->path_to_up->hops[1].out_port = up;
+       tunnel->path_to_up->hops[1].next_hop_index = 8;
+
+       tunnel->path_to_down->hops[0].in_port = up;
+       tunnel->path_to_down->hops[0].in_hop_index = 8;
+       tunnel->path_to_down->hops[0].in_counter_index = -1;
+       tunnel->path_to_down->hops[0].out_port = tb_upstream_port(up->sw);
+       tunnel->path_to_down->hops[0].next_hop_index = 8;
+
+       tunnel->path_to_down->hops[1].in_port =
+               tb_upstream_port(up->sw)->remote;
+       tunnel->path_to_down->hops[1].in_hop_index = 8;
+       tunnel->path_to_down->hops[1].in_counter_index = -1;
+       tunnel->path_to_down->hops[1].out_port = down;
+       tunnel->path_to_down->hops[1].next_hop_index = 8;
+       return tunnel;
+
+err:
+       if (tunnel) {
+               if (tunnel->path_to_down)
+                       tb_path_free(tunnel->path_to_down);
+               if (tunnel->path_to_up)
+                       tb_path_free(tunnel->path_to_up);
+               kfree(tunnel);
+       }
+       return NULL;
+}
+
+/**
+ * tb_pci_free() - free a tunnel
+ *
+ * The tunnel must have been deactivated.
+ */
+void tb_pci_free(struct tb_pci_tunnel *tunnel)
+{
+       if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
+               tb_tunnel_WARN(tunnel, "trying to free an activated tunnel\n");
+               return;
+       }
+       tb_path_free(tunnel->path_to_up);
+       tb_path_free(tunnel->path_to_down);
+       kfree(tunnel);
+}
+
+/**
+ * tb_pci_is_invalid - check whether an activated path is still valid
+ */
+bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel)
+{
+       WARN_ON(!tunnel->path_to_up->activated);
+       WARN_ON(!tunnel->path_to_down->activated);
+
+       return tb_path_is_invalid(tunnel->path_to_up)
+              || tb_path_is_invalid(tunnel->path_to_down);
+}
+
+/**
+ * tb_pci_port_active() - activate/deactivate PCI capability
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_pci_port_active(struct tb_port *port, bool active)
+{
+       u32 word = active ? 0x80000000 : 0x0;
+       int cap = tb_find_cap(port, TB_CFG_PORT, TB_CAP_PCIE);
+       if (cap <= 0) {
+               tb_port_warn(port, "TB_CAP_PCIE not found: %d\n", cap);
+               return cap ? cap : -ENXIO;
+       }
+       return tb_port_write(port, &word, TB_CFG_PORT, cap, 1);
+}
+
+/**
+ * tb_pci_restart() - activate a tunnel after a hardware reset
+ */
+int tb_pci_restart(struct tb_pci_tunnel *tunnel)
+{
+       int res;
+       tunnel->path_to_up->activated = false;
+       tunnel->path_to_down->activated = false;
+
+       tb_tunnel_info(tunnel, "activating\n");
+
+       res = tb_path_activate(tunnel->path_to_up);
+       if (res)
+               goto err;
+       res = tb_path_activate(tunnel->path_to_down);
+       if (res)
+               goto err;
+
+       res = tb_pci_port_active(tunnel->down_port, true);
+       if (res)
+               goto err;
+
+       res = tb_pci_port_active(tunnel->up_port, true);
+       if (res)
+               goto err;
+       return 0;
+err:
+       tb_tunnel_warn(tunnel, "activation failed\n");
+       tb_pci_deactivate(tunnel);
+       return res;
+}
+
+/**
+ * tb_pci_activate() - activate a tunnel
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_pci_activate(struct tb_pci_tunnel *tunnel)
+{
+       int res;
+       if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
+               tb_tunnel_WARN(tunnel,
+                              "trying to activate an already activated tunnel\n");
+               return -EINVAL;
+       }
+
+       res = tb_pci_restart(tunnel);
+       if (res)
+               return res;
+
+       list_add(&tunnel->list, &tunnel->tb->tunnel_list);
+       return 0;
+}
+
+
+
+/**
+ * tb_pci_deactivate() - deactivate a tunnel
+ */
+void tb_pci_deactivate(struct tb_pci_tunnel *tunnel)
+{
+       tb_tunnel_info(tunnel, "deactivating\n");
+       /*
+        * TODO: enable reset by writing 0x04000000 to TB_CAP_PCIE + 1 on up
+        * port. Seems to have no effect?
+        */
+       tb_pci_port_active(tunnel->up_port, false);
+       tb_pci_port_active(tunnel->down_port, false);
+       if (tunnel->path_to_down->activated)
+               tb_path_deactivate(tunnel->path_to_down);
+       if (tunnel->path_to_up->activated)
+               tb_path_deactivate(tunnel->path_to_up);
+       list_del_init(&tunnel->list);
+}
+
diff --git a/drivers/thunderbolt/tunnel_pci.h b/drivers/thunderbolt/tunnel_pci.h
new file mode 100644 (file)
index 0000000..a67f93c
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Thunderbolt Cactus Ridge driver - PCIe tunnel
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef TB_PCI_H_
+#define TB_PCI_H_
+
+#include "tb.h"
+
+struct tb_pci_tunnel {
+       struct tb *tb;
+       struct tb_port *up_port;
+       struct tb_port *down_port;
+       struct tb_path *path_to_up;
+       struct tb_path *path_to_down;
+       struct list_head list;
+};
+
+struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
+                                  struct tb_port *down);
+void tb_pci_free(struct tb_pci_tunnel *tunnel);
+int tb_pci_activate(struct tb_pci_tunnel *tunnel);
+int tb_pci_restart(struct tb_pci_tunnel *tunnel);
+void tb_pci_deactivate(struct tb_pci_tunnel *tunnel);
+bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel);
+
+#endif
+
index 96c4a19..c28d6e2 100644 (file)
@@ -91,8 +91,7 @@ static irqreturn_t pruss_handler(int irq, struct uio_info *info)
        return IRQ_HANDLED;
 }
 
-static void pruss_cleanup(struct platform_device *dev,
-                       struct uio_pruss_dev *gdev)
+static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev)
 {
        int cnt;
        struct uio_info *p = gdev->info;
@@ -103,7 +102,7 @@ static void pruss_cleanup(struct platform_device *dev,
        }
        iounmap(gdev->prussio_vaddr);
        if (gdev->ddr_vaddr) {
-               dma_free_coherent(&dev->dev, extram_pool_sz, gdev->ddr_vaddr,
+               dma_free_coherent(dev, extram_pool_sz, gdev->ddr_vaddr,
                        gdev->ddr_paddr);
        }
        if (gdev->sram_vaddr)
@@ -115,13 +114,14 @@ static void pruss_cleanup(struct platform_device *dev,
        kfree(gdev);
 }
 
-static int pruss_probe(struct platform_device *dev)
+static int pruss_probe(struct platform_device *pdev)
 {
        struct uio_info *p;
        struct uio_pruss_dev *gdev;
        struct resource *regs_prussio;
+       struct device *dev = &pdev->dev;
        int ret = -ENODEV, cnt = 0, len;
-       struct uio_pruss_pdata *pdata = dev_get_platdata(&dev->dev);
+       struct uio_pruss_pdata *pdata = dev_get_platdata(dev);
 
        gdev = kzalloc(sizeof(struct uio_pruss_dev), GFP_KERNEL);
        if (!gdev)
@@ -132,10 +132,11 @@ static int pruss_probe(struct platform_device *dev)
                kfree(gdev);
                return -ENOMEM;
        }
+
        /* Power on PRU in case its not done as part of boot-loader */
-       gdev->pruss_clk = clk_get(&dev->dev, "pruss");
+       gdev->pruss_clk = clk_get(dev, "pruss");
        if (IS_ERR(gdev->pruss_clk)) {
-               dev_err(&dev->dev, "Failed to get clock\n");
+               dev_err(dev, "Failed to get clock\n");
                ret = PTR_ERR(gdev->pruss_clk);
                kfree(gdev->info);
                kfree(gdev);
@@ -144,14 +145,14 @@ static int pruss_probe(struct platform_device *dev)
                clk_enable(gdev->pruss_clk);
        }
 
-       regs_prussio = platform_get_resource(dev, IORESOURCE_MEM, 0);
+       regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!regs_prussio) {
-               dev_err(&dev->dev, "No PRUSS I/O resource specified\n");
+               dev_err(dev, "No PRUSS I/O resource specified\n");
                goto out_free;
        }
 
        if (!regs_prussio->start) {
-               dev_err(&dev->dev, "Invalid memory resource\n");
+               dev_err(dev, "Invalid memory resource\n");
                goto out_free;
        }
 
@@ -161,27 +162,27 @@ static int pruss_probe(struct platform_device *dev)
                        (unsigned long)gen_pool_dma_alloc(gdev->sram_pool,
                                        sram_pool_sz, &gdev->sram_paddr);
                if (!gdev->sram_vaddr) {
-                       dev_err(&dev->dev, "Could not allocate SRAM pool\n");
+                       dev_err(dev, "Could not allocate SRAM pool\n");
                        goto out_free;
                }
        }
 
-       gdev->ddr_vaddr = dma_alloc_coherent(&dev->dev, extram_pool_sz,
+       gdev->ddr_vaddr = dma_alloc_coherent(dev, extram_pool_sz,
                                &(gdev->ddr_paddr), GFP_KERNEL | GFP_DMA);
        if (!gdev->ddr_vaddr) {
-               dev_err(&dev->dev, "Could not allocate external memory\n");
+               dev_err(dev, "Could not allocate external memory\n");
                goto out_free;
        }
 
        len = resource_size(regs_prussio);
        gdev->prussio_vaddr = ioremap(regs_prussio->start, len);
        if (!gdev->prussio_vaddr) {
-               dev_err(&dev->dev, "Can't remap PRUSS I/O  address range\n");
+               dev_err(dev, "Can't remap PRUSS I/O  address range\n");
                goto out_free;
        }
 
        gdev->pintc_base = pdata->pintc_base;
-       gdev->hostirq_start = platform_get_irq(dev, 0);
+       gdev->hostirq_start = platform_get_irq(pdev, 0);
 
        for (cnt = 0, p = gdev->info; cnt < MAX_PRUSS_EVT; cnt++, p++) {
                p->mem[0].addr = regs_prussio->start;
@@ -204,12 +205,12 @@ static int pruss_probe(struct platform_device *dev)
                p->handler = pruss_handler;
                p->priv = gdev;
 
-               ret = uio_register_device(&dev->dev, p);
+               ret = uio_register_device(dev, p);
                if (ret < 0)
                        goto out_free;
        }
 
-       platform_set_drvdata(dev, gdev);
+       platform_set_drvdata(pdev, gdev);
        return 0;
 
 out_free:
@@ -221,7 +222,7 @@ static int pruss_remove(struct platform_device *dev)
 {
        struct uio_pruss_dev *gdev = platform_get_drvdata(dev);
 
-       pruss_cleanup(dev, gdev);
+       pruss_cleanup(&dev->dev, gdev);
        return 0;
 }
 
index 02a7c79..d46b12d 100644 (file)
@@ -360,7 +360,6 @@ static const int CA91CX42_VSI_TO[] = { VSI0_TO, VSI1_TO, VSI2_TO, VSI3_TO,
  */
 #define CA91CX42_DCTL_L2V              (1<<31)
 #define CA91CX42_DCTL_VDW_M            (3<<22)
-#define CA91CX42_DCTL_VDW_M            (3<<22)
 #define CA91CX42_DCTL_VDW_D8           0
 #define CA91CX42_DCTL_VDW_D16          (1<<22)
 #define CA91CX42_DCTL_VDW_D32          (1<<23)
index 02df3b1..e0b8a4b 100644 (file)
@@ -563,7 +563,7 @@ static struct platform_driver ds1wm_driver = {
 
 static int __init ds1wm_init(void)
 {
-       printk("DS1WM w1 busmaster driver - (c) 2004 Szabolcs Gyurko\n");
+       pr_info("DS1WM w1 busmaster driver - (c) 2004 Szabolcs Gyurko\n");
        return platform_driver_register(&ds1wm_driver);
 }
 
index e033491..e76a9b3 100644 (file)
@@ -226,7 +226,7 @@ static int ds2482_wait_1wire_idle(struct ds2482_data *pdev)
        }
 
        if (retries >= DS2482_WAIT_IDLE_TIMEOUT)
-               printk(KERN_ERR "%s: timeout on channel %d\n",
+               pr_err("%s: timeout on channel %d\n",
                       __func__, pdev->channel);
 
        return temp;
index 7404ad3..1de6df8 100644 (file)
@@ -206,7 +206,7 @@ static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index)
        err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
                        CONTROL_CMD, VENDOR, value, index, NULL, 0, 1000);
        if (err < 0) {
-               printk(KERN_ERR "Failed to send command control message %x.%x: err=%d.\n",
+               pr_err("Failed to send command control message %x.%x: err=%d.\n",
                                value, index, err);
                return err;
        }
@@ -221,7 +221,7 @@ static int ds_send_control_mode(struct ds_device *dev, u16 value, u16 index)
        err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
                        MODE_CMD, VENDOR, value, index, NULL, 0, 1000);
        if (err < 0) {
-               printk(KERN_ERR "Failed to send mode control message %x.%x: err=%d.\n",
+               pr_err("Failed to send mode control message %x.%x: err=%d.\n",
                                value, index, err);
                return err;
        }
@@ -236,7 +236,7 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index)
        err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
                        COMM_CMD, VENDOR, value, index, NULL, 0, 1000);
        if (err < 0) {
-               printk(KERN_ERR "Failed to send control message %x.%x: err=%d.\n",
+               pr_err("Failed to send control message %x.%x: err=%d.\n",
                                value, index, err);
                return err;
        }
@@ -255,7 +255,8 @@ static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
        err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev,
                dev->ep[EP_STATUS]), buf, size, &count, 100);
        if (err < 0) {
-               printk(KERN_ERR "Failed to read 1-wire data from 0x%x: err=%d.\n", dev->ep[EP_STATUS], err);
+               pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
+                      dev->ep[EP_STATUS], err);
                return err;
        }
 
@@ -267,17 +268,17 @@ static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
 
 static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off)
 {
-       printk(KERN_INFO "%45s: %8x\n", str, buf[off]);
+       pr_info("%45s: %8x\n", str, buf[off]);
 }
 
 static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count)
 {
        int i;
 
-       printk(KERN_INFO "0x%x: count=%d, status: ", dev->ep[EP_STATUS], count);
+       pr_info("0x%x: count=%d, status: ", dev->ep[EP_STATUS], count);
        for (i=0; i<count; ++i)
-               printk("%02x ", buf[i]);
-       printk(KERN_INFO "\n");
+               pr_info("%02x ", buf[i]);
+       pr_info("\n");
 
        if (count >= 16) {
                ds_print_msg(buf, "enable flag", 0);
@@ -305,21 +306,21 @@ static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count)
                }
                ds_print_msg(buf, "Result Register Value: ", i);
                if (buf[i] & RR_NRS)
-                       printk(KERN_INFO "NRS: Reset no presence or ...\n");
+                       pr_info("NRS: Reset no presence or ...\n");
                if (buf[i] & RR_SH)
-                       printk(KERN_INFO "SH: short on reset or set path\n");
+                       pr_info("SH: short on reset or set path\n");
                if (buf[i] & RR_APP)
-                       printk(KERN_INFO "APP: alarming presence on reset\n");
+                       pr_info("APP: alarming presence on reset\n");
                if (buf[i] & RR_VPP)
-                       printk(KERN_INFO "VPP: 12V expected not seen\n");
+                       pr_info("VPP: 12V expected not seen\n");
                if (buf[i] & RR_CMP)
-                       printk(KERN_INFO "CMP: compare error\n");
+                       pr_info("CMP: compare error\n");
                if (buf[i] & RR_CRC)
-                       printk(KERN_INFO "CRC: CRC error detected\n");
+                       pr_info("CRC: CRC error detected\n");
                if (buf[i] & RR_RDP)
-                       printk(KERN_INFO "RDP: redirected page\n");
+                       pr_info("RDP: redirected page\n");
                if (buf[i] & RR_EOS)
-                       printk(KERN_INFO "EOS: end of search error\n");
+                       pr_info("EOS: end of search error\n");
        }
 }
 
@@ -330,15 +331,13 @@ static void ds_reset_device(struct ds_device *dev)
         * the strong pullup.
         */
        if (ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_SPUE))
-               printk(KERN_ERR "ds_reset_device: "
-                       "Error allowing strong pullup\n");
+               pr_err("ds_reset_device: Error allowing strong pullup\n");
        /* Chip strong pullup time was cleared. */
        if (dev->spu_sleep) {
                /* lower 4 bits are 0, see ds_set_pullup */
                u8 del = dev->spu_sleep>>4;
                if (ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del))
-                       printk(KERN_ERR "ds_reset_device: "
-                               "Error setting duration\n");
+                       pr_err("ds_reset_device: Error setting duration\n");
        }
 }
 
@@ -363,7 +362,7 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
                u8 buf[ST_SIZE];
                int count;
 
-               printk(KERN_INFO "Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
+               pr_info("Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
                usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]));
 
                count = ds_recv_status_nodump(dev, &st, buf, sizeof(buf));
@@ -391,7 +390,7 @@ static int ds_send_data(struct ds_device *dev, unsigned char *buf, int len)
        count = 0;
        err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->ep[EP_DATA_OUT]), buf, len, &count, 1000);
        if (err < 0) {
-               printk(KERN_ERR "Failed to write 1-wire data to ep0x%x: "
+               pr_err("Failed to write 1-wire data to ep0x%x: "
                        "err=%d.\n", dev->ep[EP_DATA_OUT], err);
                return err;
        }
@@ -475,7 +474,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
        } while (!(st->status & ST_IDLE) && !(err < 0) && ++count < 100);
 
        if (err >= 16 && st->status & ST_EPOF) {
-               printk(KERN_INFO "Resetting device after ST_EPOF.\n");
+               pr_info("Resetting device after ST_EPOF.\n");
                ds_reset_device(dev);
                /* Always dump the device status. */
                count = 101;
@@ -992,7 +991,7 @@ static int ds_probe(struct usb_interface *intf,
 
        dev = kzalloc(sizeof(struct ds_device), GFP_KERNEL);
        if (!dev) {
-               printk(KERN_INFO "Failed to allocate new DS9490R structure.\n");
+               pr_info("Failed to allocate new DS9490R structure.\n");
                return -ENOMEM;
        }
        dev->udev = usb_get_dev(udev);
@@ -1024,7 +1023,8 @@ static int ds_probe(struct usb_interface *intf,
 
        iface_desc = &intf->altsetting[alt];
        if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
-               printk(KERN_INFO "Num endpoints=%d. It is not DS9490R.\n", iface_desc->desc.bNumEndpoints);
+               pr_info("Num endpoints=%d. It is not DS9490R.\n",
+                       iface_desc->desc.bNumEndpoints);
                err = -EINVAL;
                goto err_out_clear;
        }
index a5df5e8..da3d0f0 100644 (file)
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/io.h>
+#include <linux/jiffies.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 
 #include "../w1.h"
 #include "../w1_int.h"
 
-/* According to the mx27 Datasheet the reset procedure should take up to about
- * 1350us. We set the timeout to 500*100us = 50ms for sure */
-#define MXC_W1_RESET_TIMEOUT 500
-
 /*
  * MXC W1 Register offsets
  */
@@ -35,6 +32,7 @@
 # define MXC_W1_CONTROL_RPP    BIT(7)
 #define MXC_W1_TIME_DIVIDER    0x02
 #define MXC_W1_RESET           0x04
+# define MXC_W1_RESET_RST      BIT(0)
 
 struct mxc_w1_device {
        void __iomem *regs;
@@ -49,24 +47,25 @@ struct mxc_w1_device {
  */
 static u8 mxc_w1_ds2_reset_bus(void *data)
 {
-       u8 reg_val;
-       unsigned int timeout_cnt = 0;
        struct mxc_w1_device *dev = data;
+       unsigned long timeout;
+
+       writeb(MXC_W1_CONTROL_RPP, dev->regs + MXC_W1_CONTROL);
 
-       writeb(MXC_W1_CONTROL_RPP, (dev->regs + MXC_W1_CONTROL));
+       /* Wait for reset sequence 511+512us, use 1500us for sure */
+       timeout = jiffies + usecs_to_jiffies(1500);
 
-       while (1) {
-               reg_val = readb(dev->regs + MXC_W1_CONTROL);
+       udelay(511 + 512);
 
-               if (!(reg_val & MXC_W1_CONTROL_RPP) ||
-                   timeout_cnt > MXC_W1_RESET_TIMEOUT)
-                       break;
-               else
-                       timeout_cnt++;
+       do {
+               u8 ctrl = readb(dev->regs + MXC_W1_CONTROL);
 
-               udelay(100);
-       }
-       return !(reg_val & MXC_W1_CONTROL_PST);
+               /* PST bit is valid after the RPP bit is self-cleared */
+               if (!(ctrl & MXC_W1_CONTROL_RPP))
+                       return !(ctrl & MXC_W1_CONTROL_PST);
+       } while (time_is_after_jiffies(timeout));
+
+       return 1;
 }
 
 /*
@@ -76,22 +75,25 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
  */
 static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
 {
-       struct mxc_w1_device *mdev = data;
-       void __iomem *ctrl_addr = mdev->regs + MXC_W1_CONTROL;
-       unsigned int timeout_cnt = 400; /* Takes max. 120us according to
-                                        * datasheet.
-                                        */
+       struct mxc_w1_device *dev = data;
+       unsigned long timeout;
 
-       writeb(MXC_W1_CONTROL_WR(bit), ctrl_addr);
+       writeb(MXC_W1_CONTROL_WR(bit), dev->regs + MXC_W1_CONTROL);
 
-       while (timeout_cnt--) {
-               if (!(readb(ctrl_addr) & MXC_W1_CONTROL_WR(bit)))
-                       break;
+       /* Wait for read/write bit (60us, Max 120us), use 200us for sure */
+       timeout = jiffies + usecs_to_jiffies(200);
 
-               udelay(1);
-       }
+       udelay(60);
 
-       return !!(readb(ctrl_addr) & MXC_W1_CONTROL_RDST);
+       do {
+               u8 ctrl = readb(dev->regs + MXC_W1_CONTROL);
+
+               /* RDST bit is valid after the WR1/RD bit is self-cleared */
+               if (!(ctrl & MXC_W1_CONTROL_WR(bit)))
+                       return !!(ctrl & MXC_W1_CONTROL_RDST);
+       } while (time_is_after_jiffies(timeout));
+
+       return 0;
 }
 
 static int mxc_w1_probe(struct platform_device *pdev)
@@ -131,6 +133,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
        if (err)
                return err;
 
+       /* Software reset 1-Wire module */
+       writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET);
+       writeb(0, mdev->regs + MXC_W1_RESET);
+
        writeb(clkdiv - 1, mdev->regs + MXC_W1_TIME_DIVIDER);
 
        mdev->bus_master.data = mdev;
index 1cdce80..cfe74d0 100644 (file)
@@ -38,6 +38,14 @@ config W1_SLAVE_DS2413
          Say Y here if you want to use a 1-wire
          DS2413 Dual Channel Addressable Switch device support
 
+config W1_SLAVE_DS2406
+       tristate "Dual Channel Addressable Switch 0x12 family support (DS2406)"
+       select CRC16
+       help
+         Say Y or M here if you want to use a 1-wire
+         DS2406 Dual Channel Addressable Switch.  EPROM read/write
+         support for these devices is not implemented.
+
 config W1_SLAVE_DS2423
        tristate "Counter 1-wire device (DS2423)"
        select CRC16
index 06529f3..1e9989a 100644 (file)
@@ -6,6 +6,7 @@ obj-$(CONFIG_W1_SLAVE_THERM)    += w1_therm.o
 obj-$(CONFIG_W1_SLAVE_SMEM)    += w1_smem.o
 obj-$(CONFIG_W1_SLAVE_DS2408)  += w1_ds2408.o
 obj-$(CONFIG_W1_SLAVE_DS2413)  += w1_ds2413.o
+obj-$(CONFIG_W1_SLAVE_DS2406)  += w1_ds2406.o
 obj-$(CONFIG_W1_SLAVE_DS2423)  += w1_ds2423.o
 obj-$(CONFIG_W1_SLAVE_DS2431)  += w1_ds2431.o
 obj-$(CONFIG_W1_SLAVE_DS2433)  += w1_ds2433.o
diff --git a/drivers/w1/slaves/w1_ds2406.c b/drivers/w1/slaves/w1_ds2406.c
new file mode 100644 (file)
index 0000000..d488961
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * w1_ds2406.c - w1 family 12 (DS2406) driver
+ * based on w1_ds2413.c by Mariusz Bialonczyk <manio@skyboo.net>
+ *
+ * Copyright (c) 2014 Scott Alfter <scott@alfter.us>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/crc16.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+#include "../w1_family.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Scott Alfter <scott@alfter.us>");
+MODULE_DESCRIPTION("w1 family 12 driver for DS2406 2 Pin IO");
+
+#define W1_F12_FUNC_READ_STATUS                   0xAA
+#define W1_F12_FUNC_WRITE_STATUS          0x55
+
+static ssize_t w1_f12_read_state(
+       struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr,
+       char *buf, loff_t off, size_t count)
+{
+       u8 w1_buf[6]={W1_F12_FUNC_READ_STATUS, 7, 0, 0, 0, 0};
+       struct w1_slave *sl = kobj_to_w1_slave(kobj);
+       u16 crc=0;
+       int i;
+       ssize_t rtnval=1;
+
+       if (off != 0)
+               return 0;
+       if (!buf)
+               return -EINVAL;
+
+       mutex_lock(&sl->master->bus_mutex);
+
+       if (w1_reset_select_slave(sl)) {
+               mutex_unlock(&sl->master->bus_mutex);
+               return -EIO;
+       }
+
+       w1_write_block(sl->master, w1_buf, 3);
+       w1_read_block(sl->master, w1_buf+3, 3);
+       for (i=0; i<6; i++)
+               crc=crc16_byte(crc, w1_buf[i]);
+       if (crc==0xb001) /* good read? */
+               *buf=((w1_buf[3]>>5)&3)|0x30;
+       else
+               rtnval=-EIO;
+
+       mutex_unlock(&sl->master->bus_mutex);
+
+       return rtnval;
+}
+
+static ssize_t w1_f12_write_output(
+       struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr,
+       char *buf, loff_t off, size_t count)
+{
+       struct w1_slave *sl = kobj_to_w1_slave(kobj);
+       u8 w1_buf[6]={W1_F12_FUNC_WRITE_STATUS, 7, 0, 0, 0, 0};
+       u16 crc=0;
+       int i;
+       ssize_t rtnval=1;
+
+       if (count != 1 || off != 0)
+               return -EFAULT;
+
+       mutex_lock(&sl->master->bus_mutex);
+
+       if (w1_reset_select_slave(sl)) {
+               mutex_unlock(&sl->master->bus_mutex);
+               return -EIO;
+       }
+
+       w1_buf[3] = (((*buf)&3)<<5)|0x1F;
+       w1_write_block(sl->master, w1_buf, 4);
+       w1_read_block(sl->master, w1_buf+4, 2);
+       for (i=0; i<6; i++)
+               crc=crc16_byte(crc, w1_buf[i]);
+       if (crc==0xb001) /* good read? */
+               w1_write_8(sl->master, 0xFF);
+       else
+               rtnval=-EIO;
+
+       mutex_unlock(&sl->master->bus_mutex);
+       return rtnval;
+}
+
+#define NB_SYSFS_BIN_FILES 2
+static struct bin_attribute w1_f12_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
+       {
+               .attr = {
+                       .name = "state",
+                       .mode = S_IRUGO,
+               },
+               .size = 1,
+               .read = w1_f12_read_state,
+       },
+       {
+               .attr = {
+                       .name = "output",
+                       .mode = S_IRUGO | S_IWUSR | S_IWGRP,
+               },
+               .size = 1,
+               .write = w1_f12_write_output,
+       }
+};
+
+static int w1_f12_add_slave(struct w1_slave *sl)
+{
+       int err = 0;
+       int i;
+
+       for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i)
+               err = sysfs_create_bin_file(
+                       &sl->dev.kobj,
+                       &(w1_f12_sysfs_bin_files[i]));
+       if (err)
+               while (--i >= 0)
+                       sysfs_remove_bin_file(&sl->dev.kobj,
+                               &(w1_f12_sysfs_bin_files[i]));
+       return err;
+}
+
+static void w1_f12_remove_slave(struct w1_slave *sl)
+{
+       int i;
+       for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i)
+               sysfs_remove_bin_file(&sl->dev.kobj,
+                       &(w1_f12_sysfs_bin_files[i]));
+}
+
+static struct w1_family_ops w1_f12_fops = {
+       .add_slave      = w1_f12_add_slave,
+       .remove_slave   = w1_f12_remove_slave,
+};
+
+static struct w1_family w1_family_12 = {
+       .fid = W1_FAMILY_DS2406,
+       .fops = &w1_f12_fops,
+};
+
+static int __init w1_f12_init(void)
+{
+       return w1_register_family(&w1_family_12);
+}
+
+static void __exit w1_f12_exit(void)
+{
+       w1_unregister_family(&w1_family_12);
+}
+
+module_init(w1_f12_init);
+module_exit(w1_f12_exit);
index 65f90dc..d9079d4 100644 (file)
@@ -181,8 +181,7 @@ static struct w1_family w1_ds2760_family = {
 
 static int __init w1_ds2760_init(void)
 {
-       printk(KERN_INFO "1-Wire driver for the DS2760 battery monitor "
-              " chip  - (c) 2004-2005, Szabolcs Gyurko\n");
+       pr_info("1-Wire driver for the DS2760 battery monitor chip - (c) 2004-2005, Szabolcs Gyurko\n");
        ida_init(&bat_ida);
        return w1_register_family(&w1_ds2760_family);
 }
index 5d73415..592f7ed 100644 (file)
@@ -1162,28 +1162,26 @@ static int __init w1_init(void)
 {
        int retval;
 
-       printk(KERN_INFO "Driver for 1-wire Dallas network protocol.\n");
+       pr_info("Driver for 1-wire Dallas network protocol.\n");
 
        w1_init_netlink();
 
        retval = bus_register(&w1_bus_type);
        if (retval) {
-               printk(KERN_ERR "Failed to register bus. err=%d.\n", retval);
+               pr_err("Failed to register bus. err=%d.\n", retval);
                goto err_out_exit_init;
        }
 
        retval = driver_register(&w1_master_driver);
        if (retval) {
-               printk(KERN_ERR
-                       "Failed to register master driver. err=%d.\n",
+               pr_err("Failed to register master driver. err=%d.\n",
                        retval);
                goto err_out_bus_unregister;
        }
 
        retval = driver_register(&w1_slave_driver);
        if (retval) {
-               printk(KERN_ERR
-                       "Failed to register slave driver. err=%d.\n",
+               pr_err("Failed to register slave driver. err=%d.\n",
                        retval);
                goto err_out_master_unregister;
        }
index 3651ec8..1dc3051 100644 (file)
@@ -87,7 +87,7 @@ void w1_unregister_family(struct w1_family *fent)
        w1_reconnect_slaves(fent, 0);
 
        while (atomic_read(&fent->refcnt)) {
-               printk(KERN_INFO "Waiting for family %u to become free: refcnt=%d.\n",
+               pr_info("Waiting for family %u to become free: refcnt=%d.\n",
                                fent->fid, atomic_read(&fent->refcnt));
 
                if (msleep_interruptible(1000))
index 26ca134..0d18365 100644 (file)
@@ -40,6 +40,7 @@
 #define W1_FAMILY_DS2760       0x30
 #define W1_FAMILY_DS2780       0x32
 #define W1_FAMILY_DS2413       0x3A
+#define W1_FAMILY_DS2406       0x12
 #define W1_THERM_DS1825                0x3B
 #define W1_FAMILY_DS2781       0x3D
 #define W1_THERM_DS28EA00      0x42
index 728039d..47249a3 100644 (file)
@@ -38,7 +38,7 @@ module_param_named(search_count, w1_search_count, int, 0);
 static int w1_enable_pullup = 1;
 module_param_named(enable_pullup, w1_enable_pullup, int, 0);
 
-static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
+static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
                                       struct device_driver *driver,
                                       struct device *device)
 {
@@ -50,8 +50,7 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
         */
        dev = kzalloc(sizeof(struct w1_master) + sizeof(struct w1_bus_master), GFP_KERNEL);
        if (!dev) {
-               printk(KERN_ERR
-                       "Failed to allocate %zd bytes for new w1 device.\n",
+               pr_err("Failed to allocate %zd bytes for new w1 device.\n",
                        sizeof(struct w1_master));
                return NULL;
        }
@@ -91,7 +90,7 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
 
        err = device_register(&dev->dev);
        if (err) {
-               printk(KERN_ERR "Failed to register master device. err=%d\n", err);
+               pr_err("Failed to register master device. err=%d\n", err);
                memset(dev, 0, sizeof(struct w1_master));
                kfree(dev);
                dev = NULL;
@@ -116,13 +115,13 @@ int w1_add_master_device(struct w1_bus_master *master)
        struct w1_netlink_msg msg;
        int id, found;
 
-        /* validate minimum functionality */
-        if (!(master->touch_bit && master->reset_bus) &&
-            !(master->write_bit && master->read_bit) &&
+       /* validate minimum functionality */
+       if (!(master->touch_bit && master->reset_bus) &&
+           !(master->write_bit && master->read_bit) &&
            !(master->write_byte && master->read_byte && master->reset_bus)) {
-               printk(KERN_ERR "w1_add_master_device: invalid function set\n");
+               pr_err("w1_add_master_device: invalid function set\n");
                return(-EINVAL);
-        }
+       }
 
        /* Lock until the device is added (or not) to w1_masters. */
        mutex_lock(&w1_mlock);
@@ -254,7 +253,7 @@ void w1_remove_master_device(struct w1_bus_master *bm)
        }
 
        if (!found) {
-               printk(KERN_ERR "Device doesn't exist.\n");
+               pr_err("Device doesn't exist.\n");
                return;
        }
 
index 9c7bd62..f9eecff 100644 (file)
@@ -29,8 +29,8 @@
 #else
 #  define assert(expr) \
         if(unlikely(!(expr))) {                                        \
-        printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n",        \
-       #expr, __FILE__, __func__, __LINE__);                   \
+               pr_err("Assertion failed! %s,%s,%s,line=%d\n",  \
+               #expr, __FILE__, __func__, __LINE__);           \
         }
 #endif
 
index 351a297..dd96562 100644 (file)
@@ -680,8 +680,7 @@ static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp)
                        if (sl)
                                dev = sl->master;
                } else {
-                       printk(KERN_NOTICE
-                               "%s: cn: %x.%x, wrong type: %u, len: %u.\n",
+                       pr_notice("%s: cn: %x.%x, wrong type: %u, len: %u.\n",
                                __func__, cn->id.idx, cn->id.val,
                                msg->type, msg->len);
                        err = -EPROTO;
index c1c0b0c..5ba0360 100644 (file)
                VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .;         \
                *(.pci_fixup_suspend)                                   \
                VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .;           \
+               VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .;    \
+               *(.pci_fixup_suspend_late)                              \
+               VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .;      \
        }                                                               \
                                                                        \
        /* Built-in firmware blobs */                                   \
diff --git a/include/linux/extcon/sm5502.h b/include/linux/extcon/sm5502.h
new file mode 100644 (file)
index 0000000..030526b
--- /dev/null
@@ -0,0 +1,287 @@
+/*
+ * sm5502.h
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_EXTCON_SM5502_H
+#define __LINUX_EXTCON_SM5502_H
+
+enum sm5502_types {
+       TYPE_SM5502,
+};
+
+/* SM5502 registers */
+enum sm5502_reg {
+       SM5502_REG_DEVICE_ID = 0x01,
+       SM5502_REG_CONTROL,
+       SM5502_REG_INT1,
+       SM5502_REG_INT2,
+       SM5502_REG_INTMASK1,
+       SM5502_REG_INTMASK2,
+       SM5502_REG_ADC,
+       SM5502_REG_TIMING_SET1,
+       SM5502_REG_TIMING_SET2,
+       SM5502_REG_DEV_TYPE1,
+       SM5502_REG_DEV_TYPE2,
+       SM5502_REG_BUTTON1,
+       SM5502_REG_BUTTON2,
+       SM5502_REG_CAR_KIT_STATUS,
+       SM5502_REG_RSVD1,
+       SM5502_REG_RSVD2,
+       SM5502_REG_RSVD3,
+       SM5502_REG_RSVD4,
+       SM5502_REG_MANUAL_SW1,
+       SM5502_REG_MANUAL_SW2,
+       SM5502_REG_DEV_TYPE3,
+       SM5502_REG_RSVD5,
+       SM5502_REG_RSVD6,
+       SM5502_REG_RSVD7,
+       SM5502_REG_RSVD8,
+       SM5502_REG_RSVD9,
+       SM5502_REG_RESET,
+       SM5502_REG_RSVD10,
+       SM5502_REG_RESERVED_ID1,
+       SM5502_REG_RSVD11,
+       SM5502_REG_RSVD12,
+       SM5502_REG_RESERVED_ID2,
+       SM5502_REG_RSVD13,
+       SM5502_REG_OCP,
+       SM5502_REG_RSVD14,
+       SM5502_REG_RSVD15,
+       SM5502_REG_RSVD16,
+       SM5502_REG_RSVD17,
+       SM5502_REG_RSVD18,
+       SM5502_REG_RSVD19,
+       SM5502_REG_RSVD20,
+       SM5502_REG_RSVD21,
+       SM5502_REG_RSVD22,
+       SM5502_REG_RSVD23,
+       SM5502_REG_RSVD24,
+       SM5502_REG_RSVD25,
+       SM5502_REG_RSVD26,
+       SM5502_REG_RSVD27,
+       SM5502_REG_RSVD28,
+       SM5502_REG_RSVD29,
+       SM5502_REG_RSVD30,
+       SM5502_REG_RSVD31,
+       SM5502_REG_RSVD32,
+       SM5502_REG_RSVD33,
+       SM5502_REG_RSVD34,
+       SM5502_REG_RSVD35,
+       SM5502_REG_RSVD36,
+       SM5502_REG_RESERVED_ID3,
+
+       SM5502_REG_END,
+};
+
+/* Define SM5502 MASK/SHIFT constant */
+#define SM5502_REG_DEVICE_ID_VENDOR_SHIFT      0
+#define SM5502_REG_DEVICE_ID_VERSION_SHIFT     3
+#define SM5502_REG_DEVICE_ID_VENDOR_MASK       (0x3 << SM5502_REG_DEVICE_ID_VENDOR_SHIFT)
+#define SM5502_REG_DEVICE_ID_VERSION_MASK      (0x1f << SM5502_REG_DEVICE_ID_VERSION_SHIFT)
+
+#define SM5502_REG_CONTROL_MASK_INT_SHIFT      0
+#define SM5502_REG_CONTROL_WAIT_SHIFT          1
+#define SM5502_REG_CONTROL_MANUAL_SW_SHIFT     2
+#define SM5502_REG_CONTROL_RAW_DATA_SHIFT      3
+#define SM5502_REG_CONTROL_SW_OPEN_SHIFT       4
+#define SM5502_REG_CONTROL_MASK_INT_MASK       (0x1 << SM5502_REG_CONTROL_MASK_INT_SHIFT)
+#define SM5502_REG_CONTROL_WAIT_MASK           (0x1 << SM5502_REG_CONTROL_WAIT_SHIFT)
+#define SM5502_REG_CONTROL_MANUAL_SW_MASK      (0x1 << SM5502_REG_CONTROL_MANUAL_SW_SHIFT)
+#define SM5502_REG_CONTROL_RAW_DATA_MASK       (0x1 << SM5502_REG_CONTROL_RAW_DATA_SHIFT)
+#define SM5502_REG_CONTROL_SW_OPEN_MASK                (0x1 << SM5502_REG_CONTROL_SW_OPEN_SHIFT)
+
+#define SM5502_REG_INTM1_ATTACH_SHIFT          0
+#define SM5502_REG_INTM1_DETACH_SHIFT          1
+#define SM5502_REG_INTM1_KP_SHIFT              2
+#define SM5502_REG_INTM1_LKP_SHIFT             3
+#define SM5502_REG_INTM1_LKR_SHIFT             4
+#define SM5502_REG_INTM1_OVP_EVENT_SHIFT       5
+#define SM5502_REG_INTM1_OCP_EVENT_SHIFT       6
+#define SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT     7
+#define SM5502_REG_INTM1_ATTACH_MASK           (0x1 << SM5502_REG_INTM1_ATTACH_SHIFT)
+#define SM5502_REG_INTM1_DETACH_MASK           (0x1 << SM5502_REG_INTM1_DETACH_SHIFT)
+#define SM5502_REG_INTM1_KP_MASK               (0x1 << SM5502_REG_INTM1_KP_SHIFT)
+#define SM5502_REG_INTM1_LKP_MASK              (0x1 << SM5502_REG_INTM1_LKP_SHIFT)
+#define SM5502_REG_INTM1_LKR_MASK              (0x1 << SM5502_REG_INTM1_LKR_SHIFT)
+#define SM5502_REG_INTM1_OVP_EVENT_MASK                (0x1 << SM5502_REG_INTM1_OVP_EVENT_SHIFT)
+#define SM5502_REG_INTM1_OCP_EVENT_MASK                (0x1 << SM5502_REG_INTM1_OCP_EVENT_SHIFT)
+#define SM5502_REG_INTM1_OVP_OCP_DIS_MASK      (0x1 << SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT)
+
+#define SM5502_REG_INTM2_VBUS_DET_SHIFT                0
+#define SM5502_REG_INTM2_REV_ACCE_SHIFT                1
+#define SM5502_REG_INTM2_ADC_CHG_SHIFT         2
+#define SM5502_REG_INTM2_STUCK_KEY_SHIFT       3
+#define SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT   4
+#define SM5502_REG_INTM2_MHL_SHIFT             5
+#define SM5502_REG_INTM2_VBUS_DET_MASK         (0x1 << SM5502_REG_INTM2_VBUS_DET_SHIFT)
+#define SM5502_REG_INTM2_REV_ACCE_MASK         (0x1 << SM5502_REG_INTM2_REV_ACCE_SHIFT)
+#define SM5502_REG_INTM2_ADC_CHG_MASK          (0x1 << SM5502_REG_INTM2_ADC_CHG_SHIFT)
+#define SM5502_REG_INTM2_STUCK_KEY_MASK                (0x1 << SM5502_REG_INTM2_STUCK_KEY_SHIFT)
+#define SM5502_REG_INTM2_STUCK_KEY_RCV_MASK    (0x1 << SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT)
+#define SM5502_REG_INTM2_MHL_MASK              (0x1 << SM5502_REG_INTM2_MHL_SHIFT)
+
+#define SM5502_REG_ADC_SHIFT                   0
+#define SM5502_REG_ADC_MASK                    (0x1f << SM5502_REG_ADC_SHIFT)
+
+#define SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT 4
+#define SM5502_REG_TIMING_SET1_KEY_PRESS_MASK  (0xf << SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT)
+#define TIMING_KEY_PRESS_100MS                 0x0
+#define TIMING_KEY_PRESS_200MS                 0x1
+#define TIMING_KEY_PRESS_300MS                 0x2
+#define TIMING_KEY_PRESS_400MS                 0x3
+#define TIMING_KEY_PRESS_500MS                 0x4
+#define TIMING_KEY_PRESS_600MS                 0x5
+#define TIMING_KEY_PRESS_700MS                 0x6
+#define TIMING_KEY_PRESS_800MS                 0x7
+#define TIMING_KEY_PRESS_900MS                 0x8
+#define TIMING_KEY_PRESS_1000MS                        0x9
+#define SM5502_REG_TIMING_SET1_ADC_DET_SHIFT   0
+#define SM5502_REG_TIMING_SET1_ADC_DET_MASK    (0xf << SM5502_REG_TIMING_SET1_ADC_DET_SHIFT)
+#define TIMING_ADC_DET_50MS                    0x0
+#define TIMING_ADC_DET_100MS                   0x1
+#define TIMING_ADC_DET_150MS                   0x2
+#define TIMING_ADC_DET_200MS                   0x3
+#define TIMING_ADC_DET_300MS                   0x4
+#define TIMING_ADC_DET_400MS                   0x5
+#define TIMING_ADC_DET_500MS                   0x6
+#define TIMING_ADC_DET_600MS                   0x7
+#define TIMING_ADC_DET_700MS                   0x8
+#define TIMING_ADC_DET_800MS                   0x9
+#define TIMING_ADC_DET_900MS                   0xA
+#define TIMING_ADC_DET_1000MS                  0xB
+
+#define SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT   4
+#define SM5502_REG_TIMING_SET2_SW_WAIT_MASK    (0xf << SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT)
+#define TIMING_SW_WAIT_10MS                    0x0
+#define TIMING_SW_WAIT_30MS                    0x1
+#define TIMING_SW_WAIT_50MS                    0x2
+#define TIMING_SW_WAIT_70MS                    0x3
+#define TIMING_SW_WAIT_90MS                    0x4
+#define TIMING_SW_WAIT_110MS                   0x5
+#define TIMING_SW_WAIT_130MS                   0x6
+#define TIMING_SW_WAIT_150MS                   0x7
+#define TIMING_SW_WAIT_170MS                   0x8
+#define TIMING_SW_WAIT_190MS                   0x9
+#define TIMING_SW_WAIT_210MS                   0xA
+#define SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT  0
+#define SM5502_REG_TIMING_SET2_LONG_KEY_MASK   (0xf << SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT)
+#define TIMING_LONG_KEY_300MS                  0x0
+#define TIMING_LONG_KEY_400MS                  0x1
+#define TIMING_LONG_KEY_500MS                  0x2
+#define TIMING_LONG_KEY_600MS                  0x3
+#define TIMING_LONG_KEY_700MS                  0x4
+#define TIMING_LONG_KEY_800MS                  0x5
+#define TIMING_LONG_KEY_900MS                  0x6
+#define TIMING_LONG_KEY_1000MS                 0x7
+#define TIMING_LONG_KEY_1100MS                 0x8
+#define TIMING_LONG_KEY_1200MS                 0x9
+#define TIMING_LONG_KEY_1300MS                 0xA
+#define TIMING_LONG_KEY_1400MS                 0xB
+#define TIMING_LONG_KEY_1500MS                 0xC
+
+#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT         0
+#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT         1
+#define SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT             2
+#define SM5502_REG_DEV_TYPE1_UART_SHIFT                        3
+#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT     4
+#define SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT             5
+#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT       6
+#define SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT             7
+#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_MASK          (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT)
+#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1__MASK         (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT)
+#define SM5502_REG_DEV_TYPE1_USB_SDP_MASK              (0x1 << SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT)
+#define SM5502_REG_DEV_TYPE1_UART_MASK                 (0x1 << SM5502_REG_DEV_TYPE1_UART_SHIFT)
+#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_MASK      (0x1 << SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT)
+#define SM5502_REG_DEV_TYPE1_USB_CHG_MASK              (0x1 << SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT)
+#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK                (0x1 << SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT)
+#define SM5502_REG_DEV_TYPE1_USB_OTG_MASK              (0x1 << SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT)
+
+#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT          0
+#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT         1
+#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT         2
+#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT                3
+#define SM5502_REG_DEV_TYPE2_PPD_SHIFT                 4
+#define SM5502_REG_DEV_TYPE2_TTY_SHIFT                 5
+#define SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT            6
+#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_MASK           (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT)
+#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_MASK          (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT)
+#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_MASK          (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT)
+#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_MASK         (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT)
+#define SM5502_REG_DEV_TYPE2_PPD_MASK                  (0x1 << SM5502_REG_DEV_TYPE2_PPD_SHIFT)
+#define SM5502_REG_DEV_TYPE2_TTY_MASK                  (0x1 << SM5502_REG_DEV_TYPE2_TTY_SHIFT)
+#define SM5502_REG_DEV_TYPE2_AV_CABLE_MASK             (0x1 << SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT)
+
+#define SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT     0
+#define SM5502_REG_MANUAL_SW1_DP_SHIFT         2
+#define SM5502_REG_MANUAL_SW1_DM_SHIFT         5
+#define SM5502_REG_MANUAL_SW1_VBUSIN_MASK      (0x3 << SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT)
+#define SM5502_REG_MANUAL_SW1_DP_MASK          (0x7 << SM5502_REG_MANUAL_SW1_DP_SHIFT)
+#define SM5502_REG_MANUAL_SW1_DM_MASK          (0x7 << SM5502_REG_MANUAL_SW1_DM_SHIFT)
+#define VBUSIN_SWITCH_OPEN                     0x0
+#define VBUSIN_SWITCH_VBUSOUT                  0x1
+#define VBUSIN_SWITCH_MIC                      0x2
+#define VBUSIN_SWITCH_VBUSOUT_WITH_USB         0x3
+#define DM_DP_CON_SWITCH_OPEN                  0x0
+#define DM_DP_CON_SWITCH_USB                   0x1
+#define DM_DP_CON_SWITCH_AUDIO                 0x2
+#define DM_DP_CON_SWITCH_UART                  0x3
+#define DM_DP_SWITCH_OPEN                      ((DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
+                                               | (DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
+#define DM_DP_SWITCH_USB                       ((DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
+                                               | (DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
+#define DM_DP_SWITCH_AUDIO                     ((DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
+                                               | (DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
+#define DM_DP_SWITCH_UART                      ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
+                                               | (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
+
+/* SM5502 Interrupts */
+enum sm5502_irq {
+       /* INT1 */
+       SM5502_IRQ_INT1_ATTACH,
+       SM5502_IRQ_INT1_DETACH,
+       SM5502_IRQ_INT1_KP,
+       SM5502_IRQ_INT1_LKP,
+       SM5502_IRQ_INT1_LKR,
+       SM5502_IRQ_INT1_OVP_EVENT,
+       SM5502_IRQ_INT1_OCP_EVENT,
+       SM5502_IRQ_INT1_OVP_OCP_DIS,
+
+       /* INT2 */
+       SM5502_IRQ_INT2_VBUS_DET,
+       SM5502_IRQ_INT2_REV_ACCE,
+       SM5502_IRQ_INT2_ADC_CHG,
+       SM5502_IRQ_INT2_STUCK_KEY,
+       SM5502_IRQ_INT2_STUCK_KEY_RCV,
+       SM5502_IRQ_INT2_MHL,
+
+       SM5502_IRQ_NUM,
+};
+
+#define SM5502_IRQ_INT1_ATTACH_MASK            BIT(0)
+#define SM5502_IRQ_INT1_DETACH_MASK            BIT(1)
+#define SM5502_IRQ_INT1_KP_MASK                        BIT(2)
+#define SM5502_IRQ_INT1_LKP_MASK               BIT(3)
+#define SM5502_IRQ_INT1_LKR_MASK               BIT(4)
+#define SM5502_IRQ_INT1_OVP_EVENT_MASK         BIT(5)
+#define SM5502_IRQ_INT1_OCP_EVENT_MASK         BIT(6)
+#define SM5502_IRQ_INT1_OVP_OCP_DIS_MASK       BIT(7)
+#define SM5502_IRQ_INT2_VBUS_DET_MASK          BIT(0)
+#define SM5502_IRQ_INT2_REV_ACCE_MASK          BIT(1)
+#define SM5502_IRQ_INT2_ADC_CHG_MASK           BIT(2)
+#define SM5502_IRQ_INT2_STUCK_KEY_MASK         BIT(3)
+#define SM5502_IRQ_INT2_STUCK_KEY_RCV_MASK     BIT(4)
+#define SM5502_IRQ_INT2_MHL_MASK               BIT(5)
+
+#endif /*  __LINUX_EXTCON_SM5502_H */
index 12a5c13..4578c72 100644 (file)
@@ -127,6 +127,9 @@ struct arizona_pdata {
        /** Internal pull on GPIO5 is disabled when used for jack detection */
        bool jd_gpio5_nopull;
 
+       /** set to true if jackdet contact opens on insert */
+       bool jd_invert;
+
        /** Use the headphone detect circuit to identify the accessory */
        bool hpdet_acc_id;
 
index 3e050b9..c466ff3 100644 (file)
@@ -262,6 +262,41 @@ enum max77693_irq_source {
        MAX77693_IRQ_GROUP_NR,
 };
 
+#define LED_IRQ_FLED2_OPEN             BIT(0)
+#define LED_IRQ_FLED2_SHORT            BIT(1)
+#define LED_IRQ_FLED1_OPEN             BIT(2)
+#define LED_IRQ_FLED1_SHORT            BIT(3)
+#define LED_IRQ_MAX_FLASH              BIT(4)
+
+#define TOPSYS_IRQ_T120C_INT           BIT(0)
+#define TOPSYS_IRQ_T140C_INT           BIT(1)
+#define TOPSYS_IRQ_LOWSYS_INT          BIT(3)
+
+#define CHG_IRQ_BYP_I                  BIT(0)
+#define CHG_IRQ_THM_I                  BIT(2)
+#define CHG_IRQ_BAT_I                  BIT(3)
+#define CHG_IRQ_CHG_I                  BIT(4)
+#define CHG_IRQ_CHGIN_I                        BIT(6)
+
+#define MUIC_IRQ_INT1_ADC              BIT(0)
+#define MUIC_IRQ_INT1_ADC_LOW          BIT(1)
+#define MUIC_IRQ_INT1_ADC_ERR          BIT(2)
+#define MUIC_IRQ_INT1_ADC1K            BIT(3)
+
+#define MUIC_IRQ_INT2_CHGTYP           BIT(0)
+#define MUIC_IRQ_INT2_CHGDETREUN       BIT(1)
+#define MUIC_IRQ_INT2_DCDTMR           BIT(2)
+#define MUIC_IRQ_INT2_DXOVP            BIT(3)
+#define MUIC_IRQ_INT2_VBVOLT           BIT(4)
+#define MUIC_IRQ_INT2_VIDRM            BIT(5)
+
+#define MUIC_IRQ_INT3_EOC              BIT(0)
+#define MUIC_IRQ_INT3_CGMBC            BIT(1)
+#define MUIC_IRQ_INT3_OVP              BIT(2)
+#define MUIC_IRQ_INT3_MBCCHG_ERR       BIT(3)
+#define MUIC_IRQ_INT3_CHG_ENABLED      BIT(4)
+#define MUIC_IRQ_INT3_BAT_DET          BIT(5)
+
 enum max77693_irq {
        /* PMIC - FLASH */
        MAX77693_LED_IRQ_FLED2_OPEN,
@@ -282,6 +317,10 @@ enum max77693_irq {
        MAX77693_CHG_IRQ_CHG_I,
        MAX77693_CHG_IRQ_CHGIN_I,
 
+       MAX77693_IRQ_NR,
+};
+
+enum max77693_irq_muic {
        /* MUIC INT1 */
        MAX77693_MUIC_IRQ_INT1_ADC,
        MAX77693_MUIC_IRQ_INT1_ADC_LOW,
@@ -304,7 +343,7 @@ enum max77693_irq {
        MAX77693_MUIC_IRQ_INT3_CHG_ENABLED,
        MAX77693_MUIC_IRQ_INT3_BAT_DET,
 
-       MAX77693_IRQ_NR,
+       MAX77693_MUIC_IRQ_NR,
 };
 
 struct max77693_dev {
@@ -319,7 +358,10 @@ struct max77693_dev {
        struct regmap *regmap_muic;
        struct regmap *regmap_haptic;
 
-       struct irq_domain *irq_domain;
+       struct regmap_irq_chip_data *irq_data_led;
+       struct regmap_irq_chip_data *irq_data_topsys;
+       struct regmap_irq_chip_data *irq_data_charger;
+       struct regmap_irq_chip_data *irq_data_muic;
 
        int irq;
        int irq_gpio;
@@ -332,14 +374,6 @@ enum max77693_types {
        TYPE_MAX77693,
 };
 
-extern int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest);
-extern int max77693_bulk_read(struct regmap *map, u8 reg, int count,
-                               u8 *buf);
-extern int max77693_write_reg(struct regmap *map, u8 reg, u8 value);
-extern int max77693_bulk_write(struct regmap *map, u8 reg, int count,
-                               u8 *buf);
-extern int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask);
-
 extern int max77693_irq_init(struct max77693_dev *max77686);
 extern void max77693_irq_exit(struct max77693_dev *max77686);
 extern int max77693_irq_resume(struct max77693_dev *max77686);
diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h
new file mode 100644 (file)
index 0000000..d5b5f76
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Bus driver.
+ *
+ * This implementation is very similar to the the virtio bus driver
+ * implementation @ include/linux/virtio.h.
+ */
+#ifndef _MIC_BUS_H_
+#define _MIC_BUS_H_
+/*
+ * Everything a mbus driver needs to work with any particular mbus
+ * implementation.
+ */
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+
+struct mbus_device_id {
+       __u32 device;
+       __u32 vendor;
+};
+
+#define MBUS_DEV_DMA_HOST 2
+#define MBUS_DEV_DMA_MIC 3
+#define MBUS_DEV_ANY_ID 0xffffffff
+
+/**
+ * mbus_device - representation of a device using mbus
+ * @mmio_va: virtual address of mmio space
+ * @hw_ops: the hardware ops supported by this device.
+ * @id: the device type identification (used to match it with a driver).
+ * @dev: underlying device.
+ * be used to communicate with.
+ * @index: unique position on the mbus bus
+ */
+struct mbus_device {
+       void __iomem *mmio_va;
+       struct mbus_hw_ops *hw_ops;
+       struct mbus_device_id id;
+       struct device dev;
+       int index;
+};
+
+/**
+ * mbus_driver - operations for a mbus I/O driver
+ * @driver: underlying device driver (populate name and owner).
+ * @id_table: the ids serviced by this driver.
+ * @probe: the function to call when a device is found.  Returns 0 or -errno.
+ * @remove: the function to call when a device is removed.
+ */
+struct mbus_driver {
+       struct device_driver driver;
+       const struct mbus_device_id *id_table;
+       int (*probe)(struct mbus_device *dev);
+       void (*scan)(struct mbus_device *dev);
+       void (*remove)(struct mbus_device *dev);
+};
+
+/**
+ * struct mic_irq - opaque pointer used as cookie
+ */
+struct mic_irq;
+
+/**
+ * mbus_hw_ops - Hardware operations for accessing a MIC device on the MIC bus.
+ */
+struct mbus_hw_ops {
+       struct mic_irq* (*request_threaded_irq)(struct mbus_device *mbdev,
+                                               irq_handler_t handler,
+                                               irq_handler_t thread_fn,
+                                               const char *name, void *data,
+                                               int intr_src);
+       void (*free_irq)(struct mbus_device *mbdev,
+                        struct mic_irq *cookie, void *data);
+       void (*ack_interrupt)(struct mbus_device *mbdev, int num);
+};
+
+struct mbus_device *
+mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
+                    struct mbus_hw_ops *hw_ops, void __iomem *mmio_va);
+void mbus_unregister_device(struct mbus_device *mbdev);
+
+int mbus_register_driver(struct mbus_driver *drv);
+void mbus_unregister_driver(struct mbus_driver *drv);
+
+static inline struct mbus_device *dev_to_mbus(struct device *_dev)
+{
+       return container_of(_dev, struct mbus_device, dev);
+}
+
+static inline struct mbus_driver *drv_to_mbus(struct device_driver *drv)
+{
+       return container_of(drv, struct mbus_driver, driver);
+}
+
+#endif /* _MIC_BUS_H */
index 6ed3647..61978a4 100644 (file)
@@ -1477,8 +1477,9 @@ enum pci_fixup_pass {
        pci_fixup_final,        /* Final phase of device fixups */
        pci_fixup_enable,       /* pci_enable_device() time */
        pci_fixup_resume,       /* pci_device_resume() */
-       pci_fixup_suspend,      /* pci_device_suspend */
+       pci_fixup_suspend,      /* pci_device_suspend() */
        pci_fixup_resume_early, /* pci_device_resume_early() */
+       pci_fixup_suspend_late, /* pci_device_suspend_late() */
 };
 
 /* Anonymous variables would be nice... */
@@ -1519,6 +1520,11 @@ enum pci_fixup_pass {
        DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,                   \
                suspend##hook, vendor, device, class,   \
                class_shift, hook)
+#define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class,    \
+                                        class_shift, hook)             \
+       DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,              \
+               suspend_late##hook, vendor, device,     \
+               class, class_shift, hook)
 
 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)                  \
        DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,                     \
@@ -1544,6 +1550,10 @@ enum pci_fixup_pass {
        DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,                   \
                suspend##hook, vendor, device,          \
                PCI_ANY_ID, 0, hook)
+#define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook)           \
+       DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,              \
+               suspend_late##hook, vendor, device,     \
+               PCI_ANY_ID, 0, hook)
 
 #ifdef CONFIG_PCI_QUIRKS
 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
index 795e957..4fc065f 100644 (file)
@@ -328,6 +328,7 @@ enum genwqe_card_state {
        GENWQE_CARD_UNUSED = 0,
        GENWQE_CARD_USED = 1,
        GENWQE_CARD_FATAL_ERROR = 2,
+       GENWQE_CARD_RELOAD_BITSTREAM = 3,
        GENWQE_CARD_STATE_MAX,
 };
 
index 1c45ba5..133d02f 100644 (file)
@@ -34,7 +34,8 @@
 #define I8K_FAN_OFF            0
 #define I8K_FAN_LOW            1
 #define I8K_FAN_HIGH           2
-#define I8K_FAN_MAX            I8K_FAN_HIGH
+#define I8K_FAN_TURBO          3
+#define I8K_FAN_MAX            I8K_FAN_TURBO
 
 #define I8K_VOL_UP             1
 #define I8K_VOL_DOWN           2
index fba1c75..8f96b3e 100644 (file)
@@ -88,7 +88,8 @@ static int hv_start_fcopy(struct hv_start_fcopy *smsg)
                }
        }
 
-       target_fd = open(target_fname, O_RDWR | O_CREAT | O_CLOEXEC, 0744);
+       target_fd = open(target_fname,
+                        O_RDWR | O_CREAT | O_TRUNC | O_CLOEXEC, 0744);
        if (target_fd == -1) {
                syslog(LOG_INFO, "Open Failed: %s", strerror(errno));
                goto done;
index e66e710..4c2aa35 100644 (file)
@@ -12,6 +12,9 @@ TARGETS += powerpc
 TARGETS += user
 TARGETS += sysctl
 
+TARGETS_HOTPLUG = cpu-hotplug
+TARGETS_HOTPLUG += memory-hotplug
+
 all:
        for TARGET in $(TARGETS); do \
                make -C $$TARGET; \
@@ -22,6 +25,21 @@ run_tests: all
                make -C $$TARGET run_tests; \
        done;
 
+hotplug:
+       for TARGET in $(TARGETS_HOTPLUG); do \
+               make -C $$TARGET; \
+       done;
+
+run_hotplug: hotplug
+       for TARGET in $(TARGETS_HOTPLUG); do \
+               make -C $$TARGET run_full_test; \
+       done;
+
+clean_hotplug:
+       for TARGET in $(TARGETS_HOTPLUG); do \
+               make -C $$TARGET clean; \
+       done;
+
 clean:
        for TARGET in $(TARGETS); do \
                make -C $$TARGET clean; \
index 5e2faf9..2660d5f 100644 (file)
@@ -4,8 +4,15 @@ The kernel contains a set of "self tests" under the tools/testing/selftests/
 directory. These are intended to be small unit tests to exercise individual
 code paths in the kernel.
 
-Running the selftests
-=====================
+On some systems, hot-plug tests could hang forever waiting for cpu and
+memory to be ready to be offlined. A special hot-plug target is created
+to run full range of hot-plug tests. In default mode, hot-plug tests run
+in safe mode with a limited scope. In limited mode, cpu-hotplug test is
+run on a single cpu as opposed to all hotplug capable cpus, and memory
+hotplug test is run on 2% of hotplug capable memory instead of 10%.
+
+Running the selftests (hotplug tests are run in limited mode)
+=============================================================
 
 To build the tests:
 
@@ -18,14 +25,26 @@ To run the tests:
 
 - note that some tests will require root privileges.
 
-
-To run only tests targetted for a single subsystem:
+To run only tests targeted for a single subsystem: (including
+hotplug targets in limited mode)
 
   $  make -C tools/testing/selftests TARGETS=cpu-hotplug run_tests
 
 See the top-level tools/testing/selftests/Makefile for the list of all possible
 targets.
 
+Running the full range hotplug selftests
+========================================
+
+To build the tests:
+
+  $ make -C tools/testing/selftests hotplug
+
+To run the tests:
+
+  $ make -C tools/testing/selftests run_hotplug
+
+- note that some tests will require root privileges.
 
 Contributing new tests
 ======================
index 790c23a..e9c28d8 100644 (file)
@@ -3,4 +3,7 @@ all:
 run_tests:
        @/bin/bash ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]"
 
+run_full_test:
+       @/bin/bash ./on-off-test.sh -a || echo "cpu-hotplug selftests: [FAIL]"
+
 clean:
index bdde7cf..98b1d65 100644 (file)
@@ -11,6 +11,8 @@ prerequisite()
                exit 0
        fi
 
+       taskset -p 01 $$
+
        SYSFS=`mount -t sysfs | head -1 | awk '{ print $3 }'`
 
        if [ ! -d "$SYSFS" ]; then
@@ -22,6 +24,19 @@ prerequisite()
                echo $msg cpu hotplug is not supported >&2
                exit 0
        fi
+
+       echo "CPU online/offline summary:"
+       online_cpus=`cat $SYSFS/devices/system/cpu/online`
+       online_max=${online_cpus##*-}
+       echo -e "\t Cpus in online state: $online_cpus"
+
+       offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
+       if [[ "a$offline_cpus" = "a" ]]; then
+               offline_cpus=0
+       else
+               offline_max=${offline_cpus##*-}
+       fi
+       echo -e "\t Cpus in offline state: $offline_cpus"
 }
 
 #
@@ -113,15 +128,25 @@ offline_cpu_expect_fail()
 }
 
 error=-12
+allcpus=0
 priority=0
+online_cpus=0
+online_max=0
+offline_cpus=0
+offline_max=0
 
-while getopts e:hp: opt; do
+while getopts e:ahp: opt; do
        case $opt in
        e)
                error=$OPTARG
                ;;
+       a)
+               allcpus=1
+               ;;
        h)
-               echo "Usage $0 [ -e errno ] [ -p notifier-priority ]"
+               echo "Usage $0 [ -a ] [ -e errno ] [ -p notifier-priority ]"
+               echo -e "\t default offline one cpu"
+               echo -e "\t run with -a option to offline all cpus"
                exit
                ;;
        p)
@@ -137,6 +162,29 @@ fi
 
 prerequisite
 
+#
+# Safe test (default) - offline and online one cpu
+#
+if [ $allcpus -eq 0 ]; then
+       echo "Limited scope test: one hotplug cpu"
+       echo -e "\t (leaves cpu in the original state):"
+       echo -e "\t online to offline to online: cpu $online_max"
+       offline_cpu_expect_success $online_max
+       online_cpu_expect_success $online_max
+
+       if [[ $offline_cpus -gt 0 ]]; then
+               echo -e "\t offline to online to offline: cpu $offline_max"
+               online_cpu_expect_success $offline_max
+               offline_cpu_expect_success $offline_max
+       fi
+       exit 0
+else
+       echo "Full scope test: all hotplug cpus"
+       echo -e "\t online all offline cpus"
+       echo -e "\t offline all online cpus"
+       echo -e "\t online all offline cpus"
+fi
+
 #
 # Online all hot-pluggable CPUs
 #
index fa4f1b3..dbba408 100644 (file)
@@ -81,7 +81,7 @@ int main(int argc, char **argv)
                /* Compare with self */
                ret = sys_kcmp(pid1, pid1, KCMP_VM, 0, 0);
                if (ret) {
-                       printf("FAIL: 0 expected but %li returned (%s)\n",
+                       printf("FAIL: 0 expected but %d returned (%s)\n",
                                ret, strerror(errno));
                        ret = -1;
                } else
index 058c76f..d46b8d4 100644 (file)
@@ -1,6 +1,9 @@
 all:
 
 run_tests:
+       @/bin/bash ./on-off-test.sh -r 2 || echo "memory-hotplug selftests: [FAIL]"
+
+run_full_test:
        @/bin/bash ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
 
 clean:
index a2816f6..6cddde0 100644 (file)
@@ -142,10 +142,16 @@ fi
 
 prerequisite
 
+echo "Test scope: $ratio% hotplug memory"
+echo -e "\t online all hotplug memory in offline state"
+echo -e "\t offline $ratio% hotplug memory in online state"
+echo -e "\t online all hotplug memory in offline state"
+
 #
 # Online all hot-pluggable memory
 #
 for memory in `hotplaggable_offline_memory`; do
+       echo offline-online $memory
        online_memory_expect_success $memory
 done
 
@@ -154,6 +160,7 @@ done
 #
 for memory in `hotpluggable_online_memory`; do
        if [ $((RANDOM % 100)) -lt $ratio ]; then
+               echo online-offline $memory
                offline_memory_expect_success $memory
        fi
 done
@@ -162,6 +169,7 @@ done
 # Online all hot-pluggable memory again
 #
 for memory in `hotplaggable_offline_memory`; do
+       echo offline-online $memory
        online_memory_expect_success $memory
 done
 
index 218a122..8056e2e 100644 (file)
@@ -1,6 +1,6 @@
 all:
-       gcc -O2 -lrt mq_open_tests.c -o mq_open_tests
-       gcc -O2 -lrt -lpthread -lpopt -o mq_perf_tests mq_perf_tests.c
+       gcc -O2 mq_open_tests.c -o mq_open_tests -lrt
+       gcc -O2 -o mq_perf_tests mq_perf_tests.c -lrt -lpthread -lpopt
 
 run_tests:
        @./mq_open_tests /test1 || echo "mq_open_tests: [FAIL]"
index 711cc29..9c1a5d3 100644 (file)
@@ -80,7 +80,8 @@ void shutdown(int exit_val, char *err_cause, int line_no)
        if (in_shutdown++)
                return;
 
-       seteuid(0);
+       if (seteuid(0) == -1)
+               perror("seteuid() failed");
 
        if (queue != -1)
                if (mq_close(queue))
@@ -292,8 +293,10 @@ int main(int argc, char *argv[])
        /* Tell the user our initial state */
        printf("\nInitial system state:\n");
        printf("\tUsing queue path:\t\t%s\n", queue_path);
-       printf("\tRLIMIT_MSGQUEUE(soft):\t\t%d\n", saved_limits.rlim_cur);
-       printf("\tRLIMIT_MSGQUEUE(hard):\t\t%d\n", saved_limits.rlim_max);
+       printf("\tRLIMIT_MSGQUEUE(soft):\t\t%ld\n",
+               (long) saved_limits.rlim_cur);
+       printf("\tRLIMIT_MSGQUEUE(hard):\t\t%ld\n",
+               (long) saved_limits.rlim_max);
        printf("\tMaximum Message Size:\t\t%d\n", saved_max_msgsize);
        printf("\tMaximum Queue Size:\t\t%d\n", saved_max_msgs);
        if (default_settings) {
@@ -308,8 +311,8 @@ int main(int argc, char *argv[])
        validate_current_settings();
 
        printf("Adjusted system state for testing:\n");
-       printf("\tRLIMIT_MSGQUEUE(soft):\t\t%d\n", cur_limits.rlim_cur);
-       printf("\tRLIMIT_MSGQUEUE(hard):\t\t%d\n", cur_limits.rlim_max);
+       printf("\tRLIMIT_MSGQUEUE(soft):\t\t%ld\n", (long) cur_limits.rlim_cur);
+       printf("\tRLIMIT_MSGQUEUE(hard):\t\t%ld\n", (long) cur_limits.rlim_max);
        printf("\tMaximum Message Size:\t\t%d\n", cur_max_msgsize);
        printf("\tMaximum Queue Size:\t\t%d\n", cur_max_msgs);
        if (default_settings) {
@@ -454,7 +457,12 @@ int main(int argc, char *argv[])
        else
                printf("Queue open with total size > 2GB when euid = 0 "
                       "failed:\t\t\tPASS\n");
-       seteuid(99);
+
+       if (seteuid(99) == -1) {
+               perror("seteuid() failed");
+               exit(1);
+       }
+
        attr.mq_maxmsg = cur_max_msgs;
        attr.mq_msgsize = cur_max_msgsize;
        if (test_queue_fail(&attr, &result))
index 2fadd4b..94dae65 100644 (file)
@@ -296,9 +296,9 @@ static inline void open_queue(struct mq_attr *attr)
        printf("\n\tQueue %s created:\n", queue_path);
        printf("\t\tmq_flags:\t\t\t%s\n", result.mq_flags & O_NONBLOCK ?
               "O_NONBLOCK" : "(null)");
-       printf("\t\tmq_maxmsg:\t\t\t%d\n", result.mq_maxmsg);
-       printf("\t\tmq_msgsize:\t\t\t%d\n", result.mq_msgsize);
-       printf("\t\tmq_curmsgs:\t\t\t%d\n", result.mq_curmsgs);
+       printf("\t\tmq_maxmsg:\t\t\t%lu\n", result.mq_maxmsg);
+       printf("\t\tmq_msgsize:\t\t\t%lu\n", result.mq_msgsize);
+       printf("\t\tmq_curmsgs:\t\t\t%lu\n", result.mq_curmsgs);
 }
 
 void *fake_cont_thread(void *arg)
@@ -440,7 +440,7 @@ void *perf_test_thread(void *arg)
                shutdown(2, "clock_getres()", __LINE__);
 
        printf("\t\tMax priorities:\t\t\t%d\n", mq_prio_max);
-       printf("\t\tClock resolution:\t\t%d nsec%s\n", res.tv_nsec,
+       printf("\t\tClock resolution:\t\t%lu nsec%s\n", res.tv_nsec,
               res.tv_nsec > 1 ? "s" : "");
 
 
@@ -454,20 +454,20 @@ void *perf_test_thread(void *arg)
        recv_total.tv_nsec = 0;
        for (i = 0; i < TEST1_LOOPS; i++)
                do_send_recv();
-       printf("\t\tSend msg:\t\t\t%d.%ds total time\n",
+       printf("\t\tSend msg:\t\t\t%ld.%lus total time\n",
               send_total.tv_sec, send_total.tv_nsec);
        nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
                 send_total.tv_nsec) / TEST1_LOOPS;
-       printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
-       printf("\t\tRecv msg:\t\t\t%d.%ds total time\n",
+       printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec);
+       printf("\t\tRecv msg:\t\t\t%ld.%lus total time\n",
               recv_total.tv_sec, recv_total.tv_nsec);
        nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
                recv_total.tv_nsec) / TEST1_LOOPS;
-       printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+       printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec);
 
 
        for (cur_test = test2; cur_test->desc != NULL; cur_test++) {
-               printf(cur_test->desc);
+               printf("%s:\n", cur_test->desc);
                printf("\t\t(%d iterations)\n", TEST2_LOOPS);
                prio_out = 0;
                send_total.tv_sec = 0;
@@ -493,16 +493,16 @@ void *perf_test_thread(void *arg)
                        cur_test->func(&prio_out);
                }
                printf("done.\n");
-               printf("\t\tSend msg:\t\t\t%d.%ds total time\n",
+               printf("\t\tSend msg:\t\t\t%ld.%lus total time\n",
                       send_total.tv_sec, send_total.tv_nsec);
                nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
                         send_total.tv_nsec) / TEST2_LOOPS;
-               printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
-               printf("\t\tRecv msg:\t\t\t%d.%ds total time\n",
+               printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec);
+               printf("\t\tRecv msg:\t\t\t%ld.%lus total time\n",
                       recv_total.tv_sec, recv_total.tv_nsec);
                nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
                        recv_total.tv_nsec) / TEST2_LOOPS;
-               printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+               printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec);
                printf("\t\tDraining queue...");
                fflush(stdout);
                clock_gettime(clock, &start);
@@ -653,8 +653,10 @@ int main(int argc, char *argv[])
        /* Tell the user our initial state */
        printf("\nInitial system state:\n");
        printf("\tUsing queue path:\t\t\t%s\n", queue_path);
-       printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%d\n", saved_limits.rlim_cur);
-       printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%d\n", saved_limits.rlim_max);
+       printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%ld\n",
+               (long) saved_limits.rlim_cur);
+       printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%ld\n",
+               (long) saved_limits.rlim_max);
        printf("\tMaximum Message Size:\t\t\t%d\n", saved_max_msgsize);
        printf("\tMaximum Queue Size:\t\t\t%d\n", saved_max_msgs);
        printf("\tNice value:\t\t\t\t%d\n", cur_nice);
@@ -667,10 +669,10 @@ int main(int argc, char *argv[])
                printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t(unlimited)\n");
                printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t(unlimited)\n");
        } else {
-               printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%d\n",
-                      cur_limits.rlim_cur);
-               printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%d\n",
-                      cur_limits.rlim_max);
+               printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%ld\n",
+                      (long) cur_limits.rlim_cur);
+               printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%ld\n",
+                      (long) cur_limits.rlim_max);
        }
        printf("\tMaximum Message Size:\t\t\t%d\n", cur_max_msgsize);
        printf("\tMaximum Queue Size:\t\t\t%d\n", cur_max_msgs);