Merge greybus driver tree into 4.8-rc6
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 19 Sep 2016 10:29:33 +0000 (12:29 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 19 Sep 2016 10:29:33 +0000 (12:29 +0200)
This pulls the external greybus driver tree into 4.8-rc6 as it should be
part of the main kernel tree and not live outside in some lonely github
repo, never to be reunited with it's true love...

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
148 files changed:
drivers/staging/greybus/.gitignore [new file with mode: 0644]
drivers/staging/greybus/Documentation/es1_ap_desc.c [new file with mode: 0644]
drivers/staging/greybus/Documentation/firmware/authenticate.c [new file with mode: 0644]
drivers/staging/greybus/Documentation/firmware/firmware-management [new file with mode: 0644]
drivers/staging/greybus/Documentation/firmware/firmware.c [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs-bus-greybus [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.1/bundle_class [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.1/bundle_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.1/state [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.2/bundle_class [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.2/bundle_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.2/state [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.ctrl/product_string [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.ctrl/vendor_string [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/ddbl1_manufacturer_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/ddbl1_product_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/interface_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/product_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/serial_number [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/vendor_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/eject [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/module_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/num_interfaces [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/bundle_class [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/bundle_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/gbphy1/gpio/gpiochip490/.gitignore [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/gbphy1/i2c-4/.gitignore [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/state [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.ctrl/product_string [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.ctrl/vendor_string [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/ddbl1_manufacturer_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/ddbl1_product_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/interface_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/product_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/serial_number [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/vendor_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.6/interface_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/eject [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/module_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/num_interfaces [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-svc/ap_intf_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-svc/endo_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/1-svc/intf_eject [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus1/bus_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.1/bundle_class [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.1/bundle_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.1/state [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.ctrl/product_string [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.ctrl/vendor_string [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/ddbl1_manufacturer_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/ddbl1_product_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/interface_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/product_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/serial_number [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/vendor_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/eject [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/module_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/num_interfaces [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-svc/ap_intf_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-svc/endo_id [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/2-svc/intf_eject [new file with mode: 0644]
drivers/staging/greybus/Documentation/sysfs/greybus2/bus_id [new file with mode: 0644]
drivers/staging/greybus/LICENSE [new file with mode: 0644]
drivers/staging/greybus/Makefile [new file with mode: 0644]
drivers/staging/greybus/README [new file with mode: 0644]
drivers/staging/greybus/arche-apb-ctrl.c [new file with mode: 0644]
drivers/staging/greybus/arche-platform.c [new file with mode: 0644]
drivers/staging/greybus/arche_platform.h [new file with mode: 0644]
drivers/staging/greybus/arpc.h [new file with mode: 0644]
drivers/staging/greybus/audio_apbridgea.c [new file with mode: 0644]
drivers/staging/greybus/audio_apbridgea.h [new file with mode: 0644]
drivers/staging/greybus/audio_codec.c [new file with mode: 0644]
drivers/staging/greybus/audio_codec.h [new file with mode: 0644]
drivers/staging/greybus/audio_gb.c [new file with mode: 0644]
drivers/staging/greybus/audio_manager.c [new file with mode: 0644]
drivers/staging/greybus/audio_manager.h [new file with mode: 0644]
drivers/staging/greybus/audio_manager_module.c [new file with mode: 0644]
drivers/staging/greybus/audio_manager_private.h [new file with mode: 0644]
drivers/staging/greybus/audio_manager_sysfs.c [new file with mode: 0644]
drivers/staging/greybus/audio_module.c [new file with mode: 0644]
drivers/staging/greybus/audio_topology.c [new file with mode: 0644]
drivers/staging/greybus/authentication.c [new file with mode: 0644]
drivers/staging/greybus/bootrom.c [new file with mode: 0644]
drivers/staging/greybus/bundle.c [new file with mode: 0644]
drivers/staging/greybus/bundle.h [new file with mode: 0644]
drivers/staging/greybus/camera.c [new file with mode: 0644]
drivers/staging/greybus/connection.c [new file with mode: 0644]
drivers/staging/greybus/connection.h [new file with mode: 0644]
drivers/staging/greybus/control.c [new file with mode: 0644]
drivers/staging/greybus/control.h [new file with mode: 0644]
drivers/staging/greybus/core.c [new file with mode: 0644]
drivers/staging/greybus/debugfs.c [new file with mode: 0644]
drivers/staging/greybus/devices [new file with mode: 0644]
drivers/staging/greybus/es2.c [new file with mode: 0644]
drivers/staging/greybus/firmware.h [new file with mode: 0644]
drivers/staging/greybus/fw-core.c [new file with mode: 0644]
drivers/staging/greybus/fw-download.c [new file with mode: 0644]
drivers/staging/greybus/fw-management.c [new file with mode: 0644]
drivers/staging/greybus/gb-camera.h [new file with mode: 0644]
drivers/staging/greybus/gbphy.c [new file with mode: 0644]
drivers/staging/greybus/gbphy.h [new file with mode: 0644]
drivers/staging/greybus/gpio.c [new file with mode: 0644]
drivers/staging/greybus/greybus.h [new file with mode: 0644]
drivers/staging/greybus/greybus_authentication.h [new file with mode: 0644]
drivers/staging/greybus/greybus_firmware.h [new file with mode: 0644]
drivers/staging/greybus/greybus_id.h [new file with mode: 0644]
drivers/staging/greybus/greybus_manifest.h [new file with mode: 0644]
drivers/staging/greybus/greybus_protocols.h [new file with mode: 0644]
drivers/staging/greybus/greybus_trace.h [new file with mode: 0644]
drivers/staging/greybus/hd.c [new file with mode: 0644]
drivers/staging/greybus/hd.h [new file with mode: 0644]
drivers/staging/greybus/hid.c [new file with mode: 0644]
drivers/staging/greybus/i2c.c [new file with mode: 0644]
drivers/staging/greybus/interface.c [new file with mode: 0644]
drivers/staging/greybus/interface.h [new file with mode: 0644]
drivers/staging/greybus/kernel_ver.h [new file with mode: 0644]
drivers/staging/greybus/light.c [new file with mode: 0644]
drivers/staging/greybus/log.c [new file with mode: 0644]
drivers/staging/greybus/loopback.c [new file with mode: 0644]
drivers/staging/greybus/manifest.c [new file with mode: 0644]
drivers/staging/greybus/manifest.h [new file with mode: 0644]
drivers/staging/greybus/module.c [new file with mode: 0644]
drivers/staging/greybus/module.h [new file with mode: 0644]
drivers/staging/greybus/operation.c [new file with mode: 0644]
drivers/staging/greybus/operation.h [new file with mode: 0644]
drivers/staging/greybus/power_supply.c [new file with mode: 0644]
drivers/staging/greybus/pwm.c [new file with mode: 0644]
drivers/staging/greybus/raw.c [new file with mode: 0644]
drivers/staging/greybus/scripts/checkpatch.pl [new file with mode: 0755]
drivers/staging/greybus/scripts/spelling.txt [new file with mode: 0644]
drivers/staging/greybus/sdio.c [new file with mode: 0644]
drivers/staging/greybus/spi.c [new file with mode: 0644]
drivers/staging/greybus/spilib.c [new file with mode: 0644]
drivers/staging/greybus/spilib.h [new file with mode: 0644]
drivers/staging/greybus/svc.c [new file with mode: 0644]
drivers/staging/greybus/svc.h [new file with mode: 0644]
drivers/staging/greybus/svc_watchdog.c [new file with mode: 0644]
drivers/staging/greybus/timesync.c [new file with mode: 0644]
drivers/staging/greybus/timesync.h [new file with mode: 0644]
drivers/staging/greybus/timesync_platform.c [new file with mode: 0644]
drivers/staging/greybus/tools/Android.mk [new file with mode: 0644]
drivers/staging/greybus/tools/Makefile [new file with mode: 0644]
drivers/staging/greybus/tools/README.loopback [new file with mode: 0644]
drivers/staging/greybus/tools/lbtest [new file with mode: 0755]
drivers/staging/greybus/tools/loopback_test.c [new file with mode: 0644]
drivers/staging/greybus/uart.c [new file with mode: 0644]
drivers/staging/greybus/usb.c [new file with mode: 0644]
drivers/staging/greybus/vibrator.c [new file with mode: 0644]

diff --git a/drivers/staging/greybus/.gitignore b/drivers/staging/greybus/.gitignore
new file mode 100644 (file)
index 0000000..faf45ee
--- /dev/null
@@ -0,0 +1,15 @@
+.*
+*.cmd
+*.ko
+*.mod.c
+modules.order
+Module.symvers
+*.o
+*.o.*
+*.swp
+.tmp_versions
+tags
+cscope.*
+ncscope.*
+*.patch
+tools/loopback_test
diff --git a/drivers/staging/greybus/Documentation/es1_ap_desc.c b/drivers/staging/greybus/Documentation/es1_ap_desc.c
new file mode 100644 (file)
index 0000000..1502089
--- /dev/null
@@ -0,0 +1,70 @@
+/* ES1 AP Bridge Chip USB descriptor definitions */
+
+static const u8 es1_dev_descriptor[] = {
+       0x12,           /* __u8   bLength */
+       0x01,           /* __u8   bDescriptorType; Device */
+       0x00, 0x02      /* __le16 bcdUSB v2.0 */
+       0x00,           /* __u8   bDeviceClass */
+       0x00,           /* __u8   bDeviceClass */
+       0x00,           /* __u8   bDeviceSubClass; */
+       0x00,           /* __u8   bDeviceProtocol; */
+       0x40,           /* __u8   bMaxPacketSize0; 2^64 = 512 Bytes */
+
+       0xff, 0xff,     /* __le16 idVendor; 0xffff  made up for now */
+       0x01, 0x00,     /* __le16 idProduct; 0x0001  made up for now */
+       0x01, 0x00,     /* __le16 bcdDevice; ES1 */
+
+       0x03,           /* __u8  iManufacturer; */
+       0x02,           /* __u8  iProduct; */
+       0x01,           /* __u8  iSerialNumber; */
+       0x01            /* __u8  bNumConfigurations; */
+};
+
+static const u8 es1_config_descriptor[] = {
+       /* one configuration */
+       0x09,           /*  __u8   bLength; */
+       0x02,           /*  __u8   bDescriptorType; Configuration */
+       0x19, 0x00,     /*  __le16 wTotalLength; */
+       0x01,           /*  __u8   bNumInterfaces; (1) */
+       0x01,           /*  __u8   bConfigurationValue; */
+       0x00,           /*  __u8   iConfiguration; */
+       0xc0,           /*  __u8   bmAttributes;
+                                Bit 7: must be set,
+                                    6: Self-powered,
+                                    5: Remote wakeup,
+                                    4..0: resvd */
+       0x00,           /*  __u8  MaxPower; */
+
+       /* one interface */
+       0x09,           /*  __u8  if_bLength; */
+       0x04,           /*  __u8  if_bDescriptorType; Interface */
+       0x00,           /*  __u8  if_bInterfaceNumber; */
+       0x00,           /*  __u8  if_bAlternateSetting; */
+       0x03,           /*  __u8  if_bNumEndpoints; */
+       0xff,           /*  __u8  if_bInterfaceClass; Vendor-specific */
+       0xff,           /*  __u8  if_bInterfaceSubClass; Vendor-specific */
+       0xff,           /*  __u8  if_bInterfaceProtocol; Vendor-specific */
+       0x00,           /*  __u8  if_iInterface; */
+
+       /* three endpoints */
+       0x07,           /*  __u8   ep_bLength; */
+       0x05,           /*  __u8   ep_bDescriptorType; Endpoint */
+       0x81,           /*  __u8   ep_bEndpointAddress; IN Endpoint 1 */
+       0x03,           /*  __u8   ep_bmAttributes; Interrupt */
+       0x00, 0x04,     /*  __le16 ep_wMaxPacketSize; 1024 */
+       0x40,           /*  __u8   ep_bInterval; 64ms */
+
+       0x07,           /*  __u8   ep_bLength; */
+       0x05,           /*  __u8   ep_bDescriptorType; Endpoint */
+       0x82,           /*  __u8   ep_bEndpointAddress; IN Endpoint 2 */
+       0x02,           /*  __u8   ep_bmAttributes; Bulk */
+       0x00, 0x04,     /*  __le16 ep_wMaxPacketSize; 1024 */
+       0x40            /*  __u8   ep_bInterval; */
+
+       0x07,           /*  __u8   ep_bLength; */
+       0x05,           /*  __u8   ep_bDescriptorType; Endpoint */
+       0x02,           /*  __u8   ep_bEndpointAddress; Out Endpoint 2 */
+       0x02,           /*  __u8   ep_bmAttributes; Bulk */
+       0x00, 0x04,     /*  __le16 ep_wMaxPacketSize; 1024 */
+       0x40            /*  __u8   ep_bInterval; */
+};
diff --git a/drivers/staging/greybus/Documentation/firmware/authenticate.c b/drivers/staging/greybus/Documentation/firmware/authenticate.c
new file mode 100644 (file)
index 0000000..ab0688a
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * Sample code to test CAP protocol
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of Google Inc. or Linaro Ltd. nor the names of
+ *    its contributors may be used to endorse or promote products
+ *    derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
+ * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include "../../greybus_authentication.h"
+
+struct cap_ioc_get_endpoint_uid uid;
+struct cap_ioc_get_ims_certificate cert = {
+       .certificate_class = 0,
+       .certificate_id = 0,
+};
+
+struct cap_ioc_authenticate authenticate = {
+       .auth_type = 0,
+       .challenge = {0},
+};
+
+int main(int argc, char *argv[])
+{
+       unsigned int timeout = 10000;
+       char *capdev;
+       int fd, ret;
+
+       /* Make sure arguments are correct */
+       if (argc != 2) {
+               printf("\nUsage: ./firmware <Path of the gb-cap-X dev>\n");
+               return 0;
+       }
+
+       capdev = argv[1];
+
+       printf("Opening %s authentication device\n", capdev);
+
+       fd = open(capdev, O_RDWR);
+       if (fd < 0) {
+               printf("Failed to open: %s\n", capdev);
+               return -1;
+       }
+
+       /* Get UID */
+       printf("Get UID\n");
+
+       ret = ioctl(fd, CAP_IOC_GET_ENDPOINT_UID, &uid);
+       if (ret < 0) {
+               printf("Failed to get UID: %s (%d)\n", capdev, ret);
+               ret = -1;
+               goto close_fd;
+       }
+
+       printf("UID received: 0x%llx\n", *(long long unsigned int *)(uid.uid));
+
+       /* Get certificate */
+       printf("Get IMS certificate\n");
+
+       ret = ioctl(fd, CAP_IOC_GET_IMS_CERTIFICATE, &cert);
+       if (ret < 0) {
+               printf("Failed to get IMS certificate: %s (%d)\n", capdev, ret);
+               ret = -1;
+               goto close_fd;
+       }
+
+       printf("IMS Certificate size: %d\n", cert.cert_size);
+
+       /* Authenticate */
+       printf("Authenticate module\n");
+
+       memcpy(authenticate.uid, uid.uid, 8);
+
+       ret = ioctl(fd, CAP_IOC_AUTHENTICATE, &authenticate);
+       if (ret < 0) {
+               printf("Failed to authenticate module: %s (%d)\n", capdev, ret);
+               ret = -1;
+               goto close_fd;
+       }
+
+       printf("Authenticated, result (%02x), sig-size (%02x)\n",
+               authenticate.result_code, authenticate.signature_size);
+
+close_fd:
+       close(fd);
+
+       return ret;
+}
diff --git a/drivers/staging/greybus/Documentation/firmware/firmware-management b/drivers/staging/greybus/Documentation/firmware/firmware-management
new file mode 100644 (file)
index 0000000..7918257
--- /dev/null
@@ -0,0 +1,333 @@
+
+Firmware Management
+-------------------
+ Copyright 2016 Google Inc.
+ Copyright 2016 Linaro Ltd.
+
+Interface-Manifest
+------------------
+
+All firmware packages on the Modules or Interfaces are managed by a special
+Firmware Management Protocol. To support Firmware Management by the AP, the
+Interface Manifest shall at least contain the Firmware Management Bundle and a
+Firmware Management Protocol CPort within it.
+
+The bundle may contain additional CPorts based on the extra functionality
+required to manage firmware packages.
+
+For example, this is how the Firmware Management part of the Interface Manifest
+may look like:
+
+       ; Firmware Management Bundle (Bundle 1):
+       [bundle-descriptor 1]
+       class = 0x16
+
+       ; (Mandatory) Firmware Management Protocol on CPort 1
+       [cport-descriptor 2]
+       bundle = 1
+       protocol = 0x18
+
+       ; (Optional) Firmware Download Protocol on CPort 2
+       [cport-descriptor 1]
+       bundle = 1
+       protocol = 0x17
+
+       ; (Optional) SPI protocol on CPort 3
+       [cport-descriptor 3]
+       bundle = 1
+       protocol = 0x0b
+
+       ; (Optional) Component Authentication Protocol (CAP) on CPort 4
+       [cport-descriptor 4]
+       bundle = 1
+       protocol = 0x19
+
+
+Sysfs Interfaces - Firmware Management
+--------------------------------------
+
+The Firmware Management Protocol interacts with Userspace using the character
+device interface. The character device will be present in /dev/ directory
+and will be named gb-fw-mgmt-<N>. The number <N> is assigned at runtime.
+
+Identifying the Character Device
+================================
+
+There can be multiple devices present in /dev/ directory with name gb-fw-mgmt-N
+and user first needs to identify the character device used for
+firmware-management for a particular interface.
+
+The Firmware Management core creates a device of class 'gb_fw_mgmt', which shall
+be used by the user to identify the right character device for it. The class
+device is created within the Bundle directory for a particular Interface.
+
+For example this is how the class-device can be present:
+
+/sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/gb_fw_mgmt/gb-fw-mgmt-0
+
+The last name in this path: gb-fw-mgmt-0 is precisely the name of the char
+device and so the device in this case will be:
+
+/dev/gb-fw-mgmt-0.
+
+Operations on the Char device
+=============================
+
+The Character device (gb-fw-mgmt-0 in example) can be opened by the userspace
+application and it can perform various 'ioctl' operations on the device. The
+device doesn't support any read/write operations.
+
+Following are the IOCTLs and their data structures available to the user:
+
+/* IOCTL support */
+#define GB_FW_LOAD_METHOD_UNIPRO               0x01
+#define GB_FW_LOAD_METHOD_INTERNAL             0x02
+
+#define GB_FW_LOAD_STATUS_FAILED               0x00
+#define GB_FW_LOAD_STATUS_UNVALIDATED          0x01
+#define GB_FW_LOAD_STATUS_VALIDATED            0x02
+#define GB_FW_LOAD_STATUS_VALIDATION_FAILED    0x03
+
+#define GB_FW_BACKEND_FW_STATUS_SUCCESS                0x01
+#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND      0x02
+#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH     0x03
+#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE     0x04
+#define GB_FW_BACKEND_FW_STATUS_INT            0x05
+#define GB_FW_BACKEND_FW_STATUS_RETRY          0x06
+#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED  0x07
+
+#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS           0x01
+#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE     0x02
+#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED     0x03
+#define GB_FW_BACKEND_VERSION_STATUS_RETRY             0x04
+#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT          0x05
+
+
+struct fw_mgmt_ioc_get_intf_version {
+       __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+       __u16 major;
+       __u16 minor;
+} __attribute__ ((__packed__));
+
+struct fw_mgmt_ioc_get_backend_version {
+       __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+       __u16 major;
+       __u16 minor;
+       __u8 status;
+} __attribute__ ((__packed__));
+
+struct fw_mgmt_ioc_intf_load_and_validate {
+       __u8                    firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+       __u8                    load_method;
+       __u8                    status;
+       __u16                   major;
+       __u16                   minor;
+} __packed;
+
+struct fw_mgmt_ioc_backend_fw_update {
+       __u8                    firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+       __u8                    status;
+} __packed;
+
+#define FW_MGMT_IOCTL_BASE                     'S'
+#define FW_MGMT_IOC_GET_INTF_FW                        _IOR(FW_MGMT_IOCTL_BASE, 0, struct fw_mgmt_ioc_get_intf_version)
+#define FW_MGMT_IOC_GET_BACKEND_FW             _IOWR(FW_MGMT_IOCTL_BASE, 1, struct fw_mgmt_ioc_get_backend_version)
+#define FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE     _IOWR(FW_MGMT_IOCTL_BASE, 2, struct fw_mgmt_ioc_intf_load_and_validate)
+#define FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE     _IOWR(FW_MGMT_IOCTL_BASE, 3, struct fw_mgmt_ioc_backend_fw_update)
+#define FW_MGMT_IOC_SET_TIMEOUT_MS             _IOW(FW_MGMT_IOCTL_BASE, 4, unsigned int)
+#define FW_MGMT_IOC_MODE_SWITCH                        _IO(FW_MGMT_IOCTL_BASE, 5)
+
+1. FW_MGMT_IOC_GET_INTF_FW:
+
+   This ioctl shall be used by the user to get the version and firmware-tag of
+   the currently running Interface Firmware. All the fields of the 'struct
+   fw_mgmt_ioc_get_fw' are filled by the kernel.
+
+2. FW_MGMT_IOC_GET_BACKEND_FW:
+
+   This ioctl shall be used by the user to get the version of a currently
+   running Backend Interface Firmware identified by a firmware-tag. The user is
+   required to fill the 'firmware_tag' field of the 'struct fw_mgmt_ioc_get_fw'
+   in this case. The 'major' and 'minor' fields are set by the kernel in
+   response.
+
+3. FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE:
+
+   This ioctl shall be used by the user to load an Interface Firmware package on
+   an Interface. The user needs to fill the 'firmware_tag' and 'load_method'
+   fields of the 'struct fw_mgmt_ioc_intf_load_and_validate'. The 'status',
+   'major' and 'minor' fields are set by the kernel in response.
+
+4. FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE:
+
+   This ioctl shall be used by the user to request an Interface to update a
+   Backend Interface Firmware.  The user is required to fill the 'firmware_tag'
+   field of the 'struct fw_mgmt_ioc_get_fw' in this case. The 'status' field is
+   set by the kernel in response.
+
+5. FW_MGMT_IOC_SET_TIMEOUT_MS:
+
+   This ioctl shall be used by the user to increase the timeout interval within
+   which the firmware must get loaded by the Module. The default timeout is 1
+   second. The user needs to pass the timeout in milliseconds.
+
+6. FW_MGMT_IOC_MODE_SWITCH:
+
+   This ioctl shall be used by the user to mode-switch the module to the
+   previously loaded interface firmware. If the interface firmware isn't loaded
+   previously, or if another unsuccessful FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE
+   operation is started after loading interface firmware, then the firmware core
+   wouldn't allow mode-switch.
+
+
+Sysfs Interfaces - Authentication
+---------------------------------
+
+The Component Authentication Protocol interacts with Userspace using the
+character device interface. The character device will be present in /dev/
+directory and will be named gb-authenticate-<N>. The number <N> is assigned at
+runtime.
+
+Identifying the Character Device
+================================
+
+There can be multiple devices present in /dev/ directory with name
+gb-authenticate-N and user first needs to identify the character device used for
+authentication a of particular interface.
+
+The Authentication core creates a device of class 'gb_authenticate', which shall
+be used by the user to identify the right character device for it. The class
+device is created within the Bundle directory for a particular Interface.
+
+For example this is how the class-device can be present:
+
+/sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/gb_authenticate/gb-authenticate-0
+
+The last name in this path: gb-authenticate-0 is precisely the name of the char
+device and so the device in this case will be:
+
+/dev/gb-authenticate-0.
+
+Operations on the Char device
+=============================
+
+The Character device (/dev/gb-authenticate-0 in above example) can be opened by
+the userspace application and it can perform various 'ioctl' operations on the
+device. The device doesn't support any read/write operations.
+
+Following are the IOCTLs and their data structures available to the user:
+
+#define CAP_CERTIFICATE_MAX_SIZE       1600
+#define CAP_SIGNATURE_MAX_SIZE         320
+
+/* Certificate class types */
+#define CAP_CERT_IMS_EAPC              0x00000001
+#define CAP_CERT_IMS_EASC              0x00000002
+#define CAP_CERT_IMS_EARC              0x00000003
+#define CAP_CERT_IMS_IAPC              0x00000004
+#define CAP_CERT_IMS_IASC              0x00000005
+#define CAP_CERT_IMS_IARC              0x00000006
+
+/* IMS Certificate response result codes */
+#define CAP_IMS_RESULT_CERT_FOUND      0x00
+#define CAP_IMS_RESULT_CERT_CLASS_INVAL        0x01
+#define CAP_IMS_RESULT_CERT_CORRUPT    0x02
+#define CAP_IMS_RESULT_CERT_NOT_FOUND  0x03
+
+/* Authentication types */
+#define CAP_AUTH_IMS_PRI               0x00000001
+#define CAP_AUTH_IMS_SEC               0x00000002
+#define CAP_AUTH_IMS_RSA               0x00000003
+
+/* Authenticate response result codes */
+#define CAP_AUTH_RESULT_CR_SUCCESS     0x00
+#define CAP_AUTH_RESULT_CR_BAD_TYPE    0x01
+#define CAP_AUTH_RESULT_CR_WRONG_EP    0x02
+#define CAP_AUTH_RESULT_CR_NO_KEY      0x03
+#define CAP_AUTH_RESULT_CR_SIG_FAIL    0x04
+
+
+/* IOCTL support */
+struct cap_ioc_get_endpoint_uid {
+       __u8                    uid[8];
+} __attribute__ ((__packed__));
+
+struct cap_ioc_get_ims_certificate {
+       __u32                   certificate_class;
+       __u32                   certificate_id;
+
+       __u8                    result_code;
+       __u32                   cert_size;
+       __u8                    certificate[CAP_CERTIFICATE_MAX_SIZE];
+} __attribute__ ((__packed__));
+
+struct cap_ioc_authenticate {
+       __u32                   auth_type;
+       __u8                    uid[8];
+       __u8                    challenge[32];
+
+       __u8                    result_code;
+       __u8                    response[64];
+       __u32                   signature_size;
+       __u8                    signature[CAP_SIGNATURE_MAX_SIZE];
+} __attribute__ ((__packed__));
+
+#define CAP_IOCTL_BASE                 'C'
+#define CAP_IOC_GET_ENDPOINT_UID       _IOR(CAP_IOCTL_BASE, 0, struct cap_ioc_get_endpoint_uid)
+#define CAP_IOC_GET_IMS_CERTIFICATE    _IOWR(CAP_IOCTL_BASE, 1, struct cap_ioc_get_ims_certificate)
+#define CAP_IOC_AUTHENTICATE           _IOWR(CAP_IOCTL_BASE, 2, struct cap_ioc_authenticate)
+
+
+1. CAP_IOC_GET_ENDPOINT_UID:
+
+   This ioctl shall be used by the user to get the endpoint UID associated with
+   the Interface.  All the fields of the 'struct cap_ioc_get_endpoint_uid' are
+   filled by the kernel.
+
+2. CAP_IOC_GET_IMS_CERTIFICATE:
+
+   This ioctl shall be used by the user to retrieve one of the available
+   cryptographic certificates held by the Interface for use in Component
+   Authentication. The user is required to fill the 'certificate_class' and
+   'certificate_id' field of the 'struct cap_ioc_get_ims_certificate' in this
+   case. The other fields will be set by the kernel in response. The first
+   'cert_size' bytes of the 'certificate' shall be read by the user and others
+   must be discarded.
+
+3. CAP_IOC_AUTHENTICATE:
+
+   This ioctl shall be used by the user to authenticate the Module attached to
+   an Interface.  The user needs to fill the 'auth_type', 'uid', and 'challenge'
+   fields of the 'struct cap_ioc_authenticate'. The other fields will be set by
+   the kernel in response.  The first 'signature_size' bytes of the 'signature'
+   shall be read by the user and others must be discarded.
+
+
+Sysfs Interfaces - Firmware Download
+------------------------------------
+
+The Firmware Download Protocol uses the existing Linux Kernel's Firmware class
+and the interface provided to userspace are described in:
+Documentation/firmware_class/.
+
+
+Sysfs Interfaces - SPI Flash
+----------------------------
+
+The SPI flash is exposed in userspace as a MTD device and is created
+within the Bundle directory. For example, this is how the path may look like:
+
+$ ls /sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/spi_master/spi32766/spi32766.0/mtd
+mtd0    mtd0ro
+
+
+Sample Applications
+-------------------
+
+The current directory also provides a firmware.c test application, which can be
+referenced while developing userspace application to talk to firmware-management
+protocol.
+
+The current directory also provides a authenticate.c test application, which can
+be referenced while developing userspace application to talk to
+component authentication protocol.
diff --git a/drivers/staging/greybus/Documentation/firmware/firmware.c b/drivers/staging/greybus/Documentation/firmware/firmware.c
new file mode 100644 (file)
index 0000000..ff93824
--- /dev/null
@@ -0,0 +1,262 @@
+/*
+ * Sample code to test firmware-management protocol
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of Google Inc. or Linaro Ltd. nor the names of
+ *    its contributors may be used to endorse or promote products
+ *    derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
+ * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include "../../greybus_firmware.h"
+
+#define FW_DEV_DEFAULT         "/dev/gb-fw-mgmt-0"
+#define FW_TAG_INT_DEFAULT     "s3f"
+#define FW_TAG_BCND_DEFAULT    "bf_01"
+#define FW_UPDATE_TYPE_DEFAULT 0
+#define FW_TIMEOUT_DEFAULT     10000;
+
+static const char *firmware_tag;
+static const char *fwdev = FW_DEV_DEFAULT;
+static int fw_update_type = FW_UPDATE_TYPE_DEFAULT;
+static int fw_timeout = FW_TIMEOUT_DEFAULT;
+
+static struct fw_mgmt_ioc_get_intf_version intf_fw_info;
+static struct fw_mgmt_ioc_get_backend_version backend_fw_info;
+static struct fw_mgmt_ioc_intf_load_and_validate intf_load;
+static struct fw_mgmt_ioc_backend_fw_update backend_update;
+
+static void usage(void)
+{
+       printf("\nUsage: ./firmware <gb-fw-mgmt-X (default: gb-fw-mgmt-0)> <interface: 0, backend: 1 (default: 0)> <firmware-tag> (default: \"s3f\"/\"bf_01\") <timeout (default: 10000 ms)>\n");
+}
+
+static int update_intf_firmware(int fd)
+{
+       int ret;
+
+       /* Get Interface Firmware Version */
+       printf("Get Interface Firmware Version\n");
+
+       ret = ioctl(fd, FW_MGMT_IOC_GET_INTF_FW, &intf_fw_info);
+       if (ret < 0) {
+               printf("Failed to get interface firmware version: %s (%d)\n",
+                       fwdev, ret);
+               return -1;
+       }
+
+       printf("Interface Firmware tag (%s), major (%d), minor (%d)\n",
+               intf_fw_info.firmware_tag, intf_fw_info.major,
+               intf_fw_info.minor);
+
+       /* Try Interface Firmware load over Unipro */
+       printf("Loading Interface Firmware\n");
+
+       intf_load.load_method = GB_FW_U_LOAD_METHOD_UNIPRO;
+       intf_load.status = 0;
+       intf_load.major = 0;
+       intf_load.minor = 0;
+
+       strncpy((char *)&intf_load.firmware_tag, firmware_tag,
+               GB_FIRMWARE_U_TAG_MAX_SIZE);
+
+       ret = ioctl(fd, FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE, &intf_load);
+       if (ret < 0) {
+               printf("Failed to load interface firmware: %s (%d)\n", fwdev,
+                       ret);
+               return -1;
+       }
+
+       if (intf_load.status != GB_FW_U_LOAD_STATUS_VALIDATED &&
+           intf_load.status != GB_FW_U_LOAD_STATUS_UNVALIDATED) {
+               printf("Load status says loading failed: %d\n",
+                       intf_load.status);
+               return -1;
+       }
+
+       printf("Interface Firmware (%s) Load done: major: %d, minor: %d, status: %d\n",
+               firmware_tag, intf_load.major, intf_load.minor,
+               intf_load.status);
+
+       /* Initiate Mode-switch to the newly loaded firmware */
+       printf("Initiate Mode switch\n");
+
+       ret = ioctl(fd, FW_MGMT_IOC_MODE_SWITCH);
+       if (ret < 0)
+               printf("Failed to initiate mode-switch (%d)\n", ret);
+
+       return ret;
+}
+
+static int update_backend_firmware(int fd)
+{
+       int ret;
+
+       /* Get Backend Firmware Version */
+       printf("Getting Backend Firmware Version\n");
+
+       strncpy((char *)&backend_fw_info.firmware_tag, firmware_tag,
+               GB_FIRMWARE_U_TAG_MAX_SIZE);
+
+retry_fw_version:
+       ret = ioctl(fd, FW_MGMT_IOC_GET_BACKEND_FW, &backend_fw_info);
+       if (ret < 0) {
+               printf("Failed to get backend firmware version: %s (%d)\n",
+                       fwdev, ret);
+               return -1;
+       }
+
+       printf("Backend Firmware tag (%s), major (%d), minor (%d), status (%d)\n",
+               backend_fw_info.firmware_tag, backend_fw_info.major,
+               backend_fw_info.minor, backend_fw_info.status);
+
+       if (backend_fw_info.status == GB_FW_U_BACKEND_VERSION_STATUS_RETRY)
+               goto retry_fw_version;
+
+       if ((backend_fw_info.status != GB_FW_U_BACKEND_VERSION_STATUS_SUCCESS)
+           && (backend_fw_info.status != GB_FW_U_BACKEND_VERSION_STATUS_NOT_AVAILABLE)) {
+               printf("Failed to get backend firmware version: %s (%d)\n",
+                       fwdev, backend_fw_info.status);
+               return -1;
+       }
+
+       /* Try Backend Firmware Update over Unipro */
+       printf("Updating Backend Firmware\n");
+
+       strncpy((char *)&backend_update.firmware_tag, firmware_tag,
+               GB_FIRMWARE_U_TAG_MAX_SIZE);
+
+retry_fw_update:
+       backend_update.status = 0;
+
+       ret = ioctl(fd, FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE, &backend_update);
+       if (ret < 0) {
+               printf("Failed to load backend firmware: %s (%d)\n", fwdev, ret);
+               return -1;
+       }
+
+       if (backend_update.status == GB_FW_U_BACKEND_FW_STATUS_RETRY) {
+               printf("Retrying firmware update: %d\n", backend_update.status);
+               goto retry_fw_update;
+       }
+
+       if (backend_update.status != GB_FW_U_BACKEND_FW_STATUS_SUCCESS) {
+               printf("Load status says loading failed: %d\n",
+                       backend_update.status);
+       } else {
+               printf("Backend Firmware (%s) Load done: status: %d\n",
+                               firmware_tag, backend_update.status);
+       }
+
+       return 0;
+}
+
+int main(int argc, char *argv[])
+{
+       int fd, ret;
+
+       if (argc > 1 &&
+           (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help"))) {
+               usage();
+               return -1;
+       }
+
+       if (argc > 1)
+               fwdev = argv[1];
+
+       if (argc > 2)
+               sscanf(argv[2], "%u", &fw_update_type);
+
+       if (argc > 3) {
+               firmware_tag = argv[3];
+       } else if (!fw_update_type) {
+               firmware_tag = FW_TAG_INT_DEFAULT;
+       } else {
+               firmware_tag = FW_TAG_BCND_DEFAULT;
+       }
+
+       if (argc > 4)
+               sscanf(argv[4], "%u", &fw_timeout);
+
+       printf("Trying Firmware update: fwdev: %s, type: %s, tag: %s, timeout: %d\n",
+               fwdev, fw_update_type == 0 ? "interface" : "backend",
+               firmware_tag, fw_timeout);
+
+       printf("Opening %s firmware management device\n", fwdev);
+
+       fd = open(fwdev, O_RDWR);
+       if (fd < 0) {
+               printf("Failed to open: %s\n", fwdev);
+               return -1;
+       }
+
+       /* Set Timeout */
+       printf("Setting timeout to %u ms\n", fw_timeout);
+
+       ret = ioctl(fd, FW_MGMT_IOC_SET_TIMEOUT_MS, &fw_timeout);
+       if (ret < 0) {
+               printf("Failed to set timeout: %s (%d)\n", fwdev, ret);
+               ret = -1;
+               goto close_fd;
+       }
+
+       if (!fw_update_type)
+               ret = update_intf_firmware(fd);
+       else
+               ret = update_backend_firmware(fd);
+
+close_fd:
+       close(fd);
+
+       return ret;
+}
diff --git a/drivers/staging/greybus/Documentation/sysfs-bus-greybus b/drivers/staging/greybus/Documentation/sysfs-bus-greybus
new file mode 100644 (file)
index 0000000..2e99896
--- /dev/null
@@ -0,0 +1,275 @@
+What:          /sys/bus/greybus/devices/greybusN
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               The "root" greybus device for the Greybus device tree, or bus,
+               where N is a dynamically assigned 1-based id.
+
+What:          /sys/bus/greybus/devices/greybusN/bus_id
+Date:          April 2016
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               The ID of the "root" greybus device, or bus.
+
+What:          /sys/bus/greybus/devices/N-M
+Date:          March 2016
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               A Module M on the bus N, where M is the 1-byte interface
+               ID of the module's primary interface.
+
+What:          /sys/bus/greybus/devices/N-M/eject
+Date:          March 2016
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               Writing a non-zero argument to this attibute disables the
+               module's interfaces before physically ejecting it.
+
+What:          /sys/bus/greybus/devices/N-M/module_id
+Date:          March 2016
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               The ID of a Greybus module, corresponding to the ID of its
+               primary interface.
+
+What:          /sys/bus/greybus/devices/N-M/num_interfaces
+Date:          March 2016
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               The number of interfaces of a module.
+
+What:          /sys/bus/greybus/devices/N-M.I
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               An Interface I on the bus N and module N-M, where I is the
+               1-byte interface ID.
+
+What:          /sys/bus/greybus/devices/N-M.I/current_now
+Date:          March 2016
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               Current measurement of the interface in microamps (uA)
+
+What:          /sys/bus/greybus/devices/N-M.I/ddbl1_manufacturer_id
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               Unipro Device Descriptor Block Level 1 manufacturer ID for the
+               greybus Interface.
+
+What:          /sys/bus/greybus/devices/N-M.I/ddbl1_product_id
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               Unipro Device Descriptor Block Level 1 product ID for the
+               greybus Interface.
+
+What:          /sys/bus/greybus/devices/N-M.I/interface_id
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               The ID of a Greybus interface.
+
+What:          /sys/bus/greybus/devices/N-M.I/interface_type
+Date:          June 2016
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               The type of a Greybus interface; "dummy", "unipro", "greybus",
+               or "unknown".
+
+What:          /sys/bus/greybus/devices/N-M.I/power_now
+Date:          March 2016
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               Power measurement of the interface in microwatts (uW)
+
+What:          /sys/bus/greybus/devices/N-M.I/power_state
+Date:          March 2016
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               This file reflects the power state of a Greybus interface. If
+               the value read from it is "on", then power is currently
+               supplied to the interface. Otherwise it will read "off" and
+               power is currently not supplied to the interface.
+
+               If the value read is "off", then writing "on" (or '1', 'y',
+               'Y') to this file will enable power to the interface and an
+               attempt to boot and possibly enumerate it will be made. Note
+               that on errors, the interface will again be powered down.
+
+               If the value read is "on", then writing "off" (or '0', 'n',
+               'N') to this file will power down the interface.
+
+What:          /sys/bus/greybus/devices/N-M.I/product_id
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               Product ID of a Greybus interface.
+
+What:          /sys/bus/greybus/devices/N-M.I/serial_number
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               Serial Number of the Greybus interface, represented by a 64 bit
+               hexadecimal number.
+
+What:          /sys/bus/greybus/devices/N-M.I/vendor_id
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               Vendor ID of a Greybus interface.
+
+What:          /sys/bus/greybus/devices/N-M.I/voltage_now
+Date:          March 2016
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               Voltage measurement of the interface in microvolts (uV)
+
+What:          /sys/bus/greybus/devices/N-M.I.ctrl
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               Abstract control device for interface I that represents the
+               current mode of an enumerated Greybus interface.
+
+What:          /sys/bus/greybus/devices/N-M.I.ctrl/product_string
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               Product ID string of a Greybus interface.
+
+What:          /sys/bus/greybus/devices/N-M.I.ctrl/vendor_string
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               Vendor ID string of a Greybus interface.
+
+What:          /sys/bus/greybus/devices/N-M.I.B
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               A bundle B on the Interface I, B is replaced by a 1-byte
+               number representing the bundle.
+
+What:          /sys/bus/greybus/devices/N-M.I.B/bundle_class
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               The greybus class of the bundle B.
+
+What:          /sys/bus/greybus/devices/N-M.I.B/bundle_id
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               The interface-unique id of the bundle B.
+
+What:          /sys/bus/greybus/devices/N-M.I.B/gpbX
+Date:          April 2016
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               The General Purpose Bridged PHY device of the bundle B,
+               where X is a dynamically assigned 0-based id.
+
+What:          /sys/bus/greybus/devices/N-M.I.B/state
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               A bundle has a state that is managed by the userspace
+               Endo process.  This file allows that Endo to signal
+               other Android HALs that the state of the bundle has
+               changed to a specific value.  When written to, any
+               process watching the file will be woken up, and the new
+               value can be read. It's a "poor-man's IPC", yes, but
+               simplifies the Android userspace code immensely.
+
+What:          /sys/bus/greybus/devices/N-svc
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               The singleton SVC device of bus N.
+
+What:          /sys/bus/greybus/devices/N-svc/ap_intf_id
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               The AP interface ID, a 1-byte non-zero integer which
+               defines the position of the AP module on the frame.
+               The interface positions are defined in the GMP
+               Module Developer Kit.
+
+What:          /sys/bus/greybus/devices/N-svc/endo_id
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               The Endo ID, which is a 2-byte hexadecimal value
+               defined by the Endo layout scheme, documented in
+               the GMP Module Developer Kit.
+
+What:          /sys/bus/greybus/devices/N-svc/intf_eject
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               Write the number of the interface that you wish to
+               forcibly eject from the system.
+
+What:          /sys/bus/greybus/devices/N-svc/version
+Date:          October 2015
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               The version number of the firmware in the SVC device.
+
+What:          /sys/bus/greybus/devices/N-svc/watchdog
+Date:          October 2016
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               If the SVC watchdog is enabled or not.  Writing 0 to this
+               file will disable the watchdog, writing 1 will enable it.
+
+What:          /sys/bus/greybus/devices/N-svc/watchdog_action
+Date:          July 2016
+KernelVersion: 4.XX
+Contact:       Greg Kroah-Hartman <greg@kroah.com>
+Description:
+               This attribute indicates the action to be performed upon SVC
+               watchdog bite.
+
+               The action can be one of the "reset" or "panic". Writing either
+               one of the "reset" or "panic" will change the behavior of SVC
+               watchdog bite. Default value is "reset".
+
+               "reset" means the UniPro subsystem is to be reset.
+
+               "panic" means SVC watchdog bite will cause kernel to panic.
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.1/bundle_class b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.1/bundle_class
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.1/bundle_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.1/bundle_id
new file mode 100644 (file)
index 0000000..d00491f
--- /dev/null
@@ -0,0 +1 @@
+1
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.1/state b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.1/state
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.2/bundle_class b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.2/bundle_class
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.2/bundle_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.2/bundle_id
new file mode 100644 (file)
index 0000000..0cfbf08
--- /dev/null
@@ -0,0 +1 @@
+2
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.2/state b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.2/state
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.ctrl/product_string b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.ctrl/product_string
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.ctrl/vendor_string b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/1-2.2.ctrl/vendor_string
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/ddbl1_manufacturer_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/ddbl1_manufacturer_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/ddbl1_product_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/ddbl1_product_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/interface_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/interface_id
new file mode 100644 (file)
index 0000000..0cfbf08
--- /dev/null
@@ -0,0 +1 @@
+2
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/product_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/product_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/serial_number b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/serial_number
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/vendor_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/1-2.2/vendor_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/eject b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/eject
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/module_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/module_id
new file mode 100644 (file)
index 0000000..0cfbf08
--- /dev/null
@@ -0,0 +1 @@
+2
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/num_interfaces b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-2/num_interfaces
new file mode 100644 (file)
index 0000000..d00491f
--- /dev/null
@@ -0,0 +1 @@
+1
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/bundle_class b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/bundle_class
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/bundle_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/bundle_id
new file mode 100644 (file)
index 0000000..0cfbf08
--- /dev/null
@@ -0,0 +1 @@
+2
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/gbphy1/gpio/gpiochip490/.gitignore b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/gbphy1/gpio/gpiochip490/.gitignore
new file mode 100644 (file)
index 0000000..f935021
--- /dev/null
@@ -0,0 +1 @@
+!.gitignore
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/gbphy1/i2c-4/.gitignore b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/gbphy1/i2c-4/.gitignore
new file mode 100644 (file)
index 0000000..f935021
--- /dev/null
@@ -0,0 +1 @@
+!.gitignore
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/state b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.2/state
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.ctrl/product_string b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.ctrl/product_string
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.ctrl/vendor_string b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/1-5.5.ctrl/vendor_string
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/ddbl1_manufacturer_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/ddbl1_manufacturer_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/ddbl1_product_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/ddbl1_product_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/interface_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/interface_id
new file mode 100644 (file)
index 0000000..7ed6ff8
--- /dev/null
@@ -0,0 +1 @@
+5
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/product_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/product_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/serial_number b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/serial_number
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/vendor_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.5/vendor_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.6/interface_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/1-5.6/interface_id
new file mode 100644 (file)
index 0000000..1e8b314
--- /dev/null
@@ -0,0 +1 @@
+6
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/eject b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/eject
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/module_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/module_id
new file mode 100644 (file)
index 0000000..7ed6ff8
--- /dev/null
@@ -0,0 +1 @@
+5
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/num_interfaces b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-5/num_interfaces
new file mode 100644 (file)
index 0000000..0cfbf08
--- /dev/null
@@ -0,0 +1 @@
+2
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-svc/ap_intf_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-svc/ap_intf_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-svc/endo_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-svc/endo_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/1-svc/intf_eject b/drivers/staging/greybus/Documentation/sysfs/greybus1/1-svc/intf_eject
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus1/bus_id b/drivers/staging/greybus/Documentation/sysfs/greybus1/bus_id
new file mode 100644 (file)
index 0000000..d00491f
--- /dev/null
@@ -0,0 +1 @@
+1
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.1/bundle_class b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.1/bundle_class
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.1/bundle_id b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.1/bundle_id
new file mode 100644 (file)
index 0000000..d00491f
--- /dev/null
@@ -0,0 +1 @@
+1
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.1/state b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.1/state
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.ctrl/product_string b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.ctrl/product_string
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.ctrl/vendor_string b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/2-3.3.ctrl/vendor_string
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/ddbl1_manufacturer_id b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/ddbl1_manufacturer_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/ddbl1_product_id b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/ddbl1_product_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/interface_id b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/interface_id
new file mode 100644 (file)
index 0000000..00750ed
--- /dev/null
@@ -0,0 +1 @@
+3
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/product_id b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/product_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/serial_number b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/serial_number
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/vendor_id b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/2-3.3/vendor_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/eject b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/eject
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/module_id b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/module_id
new file mode 100644 (file)
index 0000000..00750ed
--- /dev/null
@@ -0,0 +1 @@
+3
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/num_interfaces b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-3/num_interfaces
new file mode 100644 (file)
index 0000000..d00491f
--- /dev/null
@@ -0,0 +1 @@
+1
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-svc/ap_intf_id b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-svc/ap_intf_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-svc/endo_id b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-svc/endo_id
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/2-svc/intf_eject b/drivers/staging/greybus/Documentation/sysfs/greybus2/2-svc/intf_eject
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/drivers/staging/greybus/Documentation/sysfs/greybus2/bus_id b/drivers/staging/greybus/Documentation/sysfs/greybus2/bus_id
new file mode 100644 (file)
index 0000000..0cfbf08
--- /dev/null
@@ -0,0 +1 @@
+2
diff --git a/drivers/staging/greybus/LICENSE b/drivers/staging/greybus/LICENSE
new file mode 100644 (file)
index 0000000..d7f1051
--- /dev/null
@@ -0,0 +1,339 @@
+GNU GENERAL PUBLIC LICENSE
+                       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc., <http://fsf.org/>
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                            NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    {description}
+    Copyright (C) {year}  {fullname}
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  {signature of Ty Coon}, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/drivers/staging/greybus/Makefile b/drivers/staging/greybus/Makefile
new file mode 100644 (file)
index 0000000..fa5aaf3
--- /dev/null
@@ -0,0 +1,147 @@
+greybus-y :=   core.o          \
+               debugfs.o       \
+               hd.o            \
+               manifest.o      \
+               module.o        \
+               interface.o     \
+               bundle.o        \
+               connection.o    \
+               control.o       \
+               svc.o           \
+               svc_watchdog.o  \
+               operation.o     \
+               timesync.o      \
+               timesync_platform.o
+
+gb-gbphy-y := gbphy.o
+
+# Prefix all modules with gb-
+gb-vibrator-y := vibrator.o
+gb-power-supply-y := power_supply.o
+gb-log-y := log.o
+gb-loopback-y := loopback.o
+gb-light-y := light.o
+gb-raw-y := raw.o
+gb-hid-y := hid.o
+gb-es2-y := es2.o
+gb-arche-y := arche-platform.o arche-apb-ctrl.o
+gb-audio-module-y := audio_module.o audio_topology.o
+gb-audio-codec-y := audio_codec.o
+gb-audio-gb-y := audio_gb.o
+gb-audio-apbridgea-y := audio_apbridgea.o
+gb-audio-manager-y += audio_manager.o
+gb-audio-manager-y += audio_manager_module.o
+gb-bootrom-y := bootrom.o
+gb-camera-y := camera.o
+gb-firmware-y := fw-core.o fw-download.o fw-management.o authentication.o
+gb-spilib-y := spilib.o
+gb-sdio-y := sdio.o
+gb-uart-y := uart.o
+gb-pwm-y := pwm.o
+gb-gpio-y := gpio.o
+gb-i2c-y := i2c.o
+gb-usb-y := usb.o
+gb-spi-y := spi.o
+
+obj-m += greybus.o
+obj-m += gb-gbphy.o
+obj-m += gb-vibrator.o
+obj-m += gb-power-supply.o
+obj-m += gb-log.o
+obj-m += gb-loopback.o
+obj-m += gb-light.o
+obj-m += gb-hid.o
+obj-m += gb-raw.o
+obj-m += gb-es2.o
+ifeq ($(CONFIG_USB_HSIC_USB3613),y)
+ obj-m += gb-arche.o
+endif
+ifeq ($(CONFIG_ARCH_MSM8994),y)
+ obj-m += gb-audio-codec.o
+ obj-m += gb-audio-module.o
+ obj-m += gb-camera.o
+endif
+obj-m += gb-audio-gb.o
+obj-m += gb-audio-apbridgea.o
+obj-m += gb-audio-manager.o
+obj-m += gb-bootrom.o
+obj-m += gb-firmware.o
+obj-m += gb-spilib.o
+obj-m += gb-sdio.o
+obj-m += gb-uart.o
+obj-m += gb-pwm.o
+obj-m += gb-gpio.o
+obj-m += gb-i2c.o
+obj-m += gb-usb.o
+obj-m += gb-spi.o
+
+KERNELVER              ?= $(shell uname -r)
+KERNELDIR              ?= /lib/modules/$(KERNELVER)/build
+INSTALL_MOD_PATH       ?= /..
+PWD                    := $(shell pwd)
+
+# kernel config option that shall be enable
+CONFIG_OPTIONS_ENABLE := POWER_SUPPLY PWM SYSFS SPI USB SND_SOC MMC LEDS_CLASS INPUT
+
+# kernel config option that shall be disable
+CONFIG_OPTIONS_DISABLE :=
+
+# this only run in kbuild part of the makefile
+ifneq ($(KERNELRELEASE),)
+# This function returns the argument version if current kernel version is minor
+# than the passed version, return 1 if equal or the current kernel version if it
+# is greater than argument version.
+kvers_cmp=$(shell [ "$(KERNELVERSION)" = "$(1)" ] && echo 1 || printf "$(1)\n$(KERNELVERSION)" | sort -V | tail -1)
+
+ifneq ($(call kvers_cmp,"3.19.0"),3.19.0)
+    CONFIG_OPTIONS_ENABLE += LEDS_CLASS_FLASH
+endif
+
+ifneq ($(call kvers_cmp,"4.2.0"),4.2.0)
+    CONFIG_OPTIONS_ENABLE += V4L2_FLASH_LED_CLASS
+endif
+
+$(foreach opt,$(CONFIG_OPTIONS_ENABLE),$(if $(CONFIG_$(opt)),, \
+     $(error CONFIG_$(opt) is disabled in the kernel configuration and must be enable \
+     to continue compilation)))
+$(foreach opt,$(CONFIG_OPTIONS_DISABLE),$(if $(filter m y, $(CONFIG_$(opt))), \
+     $(error CONFIG_$(opt) is enabled in the kernel configuration and must be disable \
+     to continue compilation),))
+endif
+
+# add -Wall to try to catch everything we can.
+ccflags-y := -Wall
+
+# needed for trace events
+ccflags-y += -I$(src)
+
+GB_AUDIO_MANAGER_SYSFS ?= true
+ifeq ($(GB_AUDIO_MANAGER_SYSFS),true)
+gb-audio-manager-y += audio_manager_sysfs.o
+ccflags-y += -DGB_AUDIO_MANAGER_SYSFS
+endif
+
+all: module
+
+tools::
+       $(MAKE) -C tools KERNELDIR=$(realpath $(KERNELDIR))
+
+module:
+       $(MAKE) -C $(KERNELDIR) M=$(PWD)
+
+check:
+       $(MAKE) -C $(KERNELDIR) M=$(PWD) C=2 CF="-D__CHECK_ENDIAN__"
+
+clean:
+       rm -f *.o *~ core .depend .*.cmd *.ko *.mod.c
+       rm -f Module.markers Module.symvers modules.order
+       rm -rf .tmp_versions Modules.symvers
+       $(MAKE) -C tools clean
+
+coccicheck:
+       $(MAKE) -C $(KERNELDIR) M=$(PWD) coccicheck
+
+install: module
+       mkdir -p $(INSTALL_MOD_PATH)/lib/modules/$(KERNELVER)/kernel/drivers/greybus/
+       cp -f *.ko $(INSTALL_MOD_PATH)/lib/modules/$(KERNELVER)/kernel/drivers/greybus/
+       depmod -b $(INSTALL_MOD_PATH) -a $(KERNELVER)
diff --git a/drivers/staging/greybus/README b/drivers/staging/greybus/README
new file mode 100644 (file)
index 0000000..b0745d3
--- /dev/null
@@ -0,0 +1,10 @@
+Greybus kernel code
+
+To build against the running kernel (odds are you don't want this):
+       make
+
+To build against a specific kernel source tree (odds are you want this):
+       KERNELDIR=/home/some/random/place make
+
+Any questions / concerns about this code base, please email:
+       Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
new file mode 100644 (file)
index 0000000..59d9d42
--- /dev/null
@@ -0,0 +1,522 @@
+/*
+ * Arche Platform driver to control APB.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+#include "arche_platform.h"
+
+
+struct arche_apb_ctrl_drvdata {
+       /* Control GPIO signals to and from AP <=> AP Bridges */
+       int resetn_gpio;
+       int boot_ret_gpio;
+       int pwroff_gpio;
+       int wake_in_gpio;
+       int wake_out_gpio;
+       int pwrdn_gpio;
+
+       enum arche_platform_state state;
+       bool init_disabled;
+
+       struct regulator *vcore;
+       struct regulator *vio;
+
+       int clk_en_gpio;
+       struct clk *clk;
+
+       struct pinctrl *pinctrl;
+       struct pinctrl_state *pin_default;
+
+       /* V2: SPI Bus control  */
+       int spi_en_gpio;
+       bool spi_en_polarity_high;
+};
+
+/*
+ * Note that these low level api's are active high
+ */
+static inline void deassert_reset(unsigned int gpio)
+{
+       gpio_set_value(gpio, 1);
+}
+
+static inline void assert_reset(unsigned int gpio)
+{
+       gpio_set_value(gpio, 0);
+}
+
+/*
+ * Note: Please do not modify the below sequence, as it is as per the spec
+ */
+static int coldboot_seq(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+       int ret;
+
+       if (apb->init_disabled ||
+                       apb->state == ARCHE_PLATFORM_STATE_ACTIVE)
+               return 0;
+
+       /* Hold APB in reset state */
+       assert_reset(apb->resetn_gpio);
+
+       if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
+                       gpio_is_valid(apb->spi_en_gpio))
+               devm_gpio_free(dev, apb->spi_en_gpio);
+
+       /* Enable power to APB */
+       if (!IS_ERR(apb->vcore)) {
+               ret = regulator_enable(apb->vcore);
+               if (ret) {
+                       dev_err(dev, "failed to enable core regulator\n");
+                       return ret;
+               }
+       }
+
+       if (!IS_ERR(apb->vio)) {
+               ret = regulator_enable(apb->vio);
+               if (ret) {
+                       dev_err(dev, "failed to enable IO regulator\n");
+                       return ret;
+               }
+       }
+
+       apb_bootret_deassert(dev);
+
+       /* On DB3 clock was not mandatory */
+       if (gpio_is_valid(apb->clk_en_gpio))
+               gpio_set_value(apb->clk_en_gpio, 1);
+
+       usleep_range(100, 200);
+
+       /* deassert reset to APB : Active-low signal */
+       deassert_reset(apb->resetn_gpio);
+
+       apb->state = ARCHE_PLATFORM_STATE_ACTIVE;
+
+       return 0;
+}
+
+static int fw_flashing_seq(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+       int ret;
+
+       if (apb->init_disabled ||
+                       apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
+               return 0;
+
+       ret = regulator_enable(apb->vcore);
+       if (ret) {
+               dev_err(dev, "failed to enable core regulator\n");
+               return ret;
+       }
+
+       ret = regulator_enable(apb->vio);
+       if (ret) {
+               dev_err(dev, "failed to enable IO regulator\n");
+               return ret;
+       }
+
+       if (gpio_is_valid(apb->spi_en_gpio)) {
+               unsigned long flags;
+
+               if (apb->spi_en_polarity_high)
+                       flags = GPIOF_OUT_INIT_HIGH;
+               else
+                       flags = GPIOF_OUT_INIT_LOW;
+
+               ret = devm_gpio_request_one(dev, apb->spi_en_gpio,
+                               flags, "apb_spi_en");
+               if (ret) {
+                       dev_err(dev, "Failed requesting SPI bus en gpio %d\n",
+                               apb->spi_en_gpio);
+                       return ret;
+               }
+       }
+
+       /* for flashing device should be in reset state */
+       assert_reset(apb->resetn_gpio);
+       apb->state = ARCHE_PLATFORM_STATE_FW_FLASHING;
+
+       return 0;
+}
+
+static int standby_boot_seq(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+
+       if (apb->init_disabled)
+               return 0;
+
+       /* Even if it is in OFF state, then we do not want to change the state */
+       if (apb->state == ARCHE_PLATFORM_STATE_STANDBY ||
+                       apb->state == ARCHE_PLATFORM_STATE_OFF)
+               return 0;
+
+       if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
+                       gpio_is_valid(apb->spi_en_gpio))
+               devm_gpio_free(dev, apb->spi_en_gpio);
+
+       /*
+        * As per WDM spec, do nothing
+        *
+        * Pasted from WDM spec,
+        *  - A falling edge on POWEROFF_L is detected (a)
+        *  - WDM enters standby mode, but no output signals are changed
+        * */
+
+       /* TODO: POWEROFF_L is input to WDM module  */
+       apb->state = ARCHE_PLATFORM_STATE_STANDBY;
+       return 0;
+}
+
+static void poweroff_seq(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+
+       if (apb->init_disabled || apb->state == ARCHE_PLATFORM_STATE_OFF)
+               return;
+
+       if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
+                       gpio_is_valid(apb->spi_en_gpio))
+               devm_gpio_free(dev, apb->spi_en_gpio);
+
+       /* disable the clock */
+       if (gpio_is_valid(apb->clk_en_gpio))
+               gpio_set_value(apb->clk_en_gpio, 0);
+
+       if (!IS_ERR(apb->vcore) && regulator_is_enabled(apb->vcore) > 0)
+               regulator_disable(apb->vcore);
+
+       if (!IS_ERR(apb->vio) && regulator_is_enabled(apb->vio) > 0)
+               regulator_disable(apb->vio);
+
+       /* As part of exit, put APB back in reset state */
+       assert_reset(apb->resetn_gpio);
+       apb->state = ARCHE_PLATFORM_STATE_OFF;
+
+       /* TODO: May have to send an event to SVC about this exit */
+}
+
+void apb_bootret_assert(struct device *dev)
+{
+       struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
+
+       gpio_set_value(apb->boot_ret_gpio, 1);
+}
+
+void apb_bootret_deassert(struct device *dev)
+{
+       struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
+
+       gpio_set_value(apb->boot_ret_gpio, 0);
+}
+
+int apb_ctrl_coldboot(struct device *dev)
+{
+       return coldboot_seq(to_platform_device(dev));
+}
+
+int apb_ctrl_fw_flashing(struct device *dev)
+{
+       return fw_flashing_seq(to_platform_device(dev));
+}
+
+int apb_ctrl_standby_boot(struct device *dev)
+{
+       return standby_boot_seq(to_platform_device(dev));
+}
+
+void apb_ctrl_poweroff(struct device *dev)
+{
+       poweroff_seq(to_platform_device(dev));
+}
+
+static ssize_t state_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+       int ret = 0;
+       bool is_disabled;
+
+       if (sysfs_streq(buf, "off")) {
+               if (apb->state == ARCHE_PLATFORM_STATE_OFF)
+                       return count;
+
+               poweroff_seq(pdev);
+       } else if (sysfs_streq(buf, "active")) {
+               if (apb->state == ARCHE_PLATFORM_STATE_ACTIVE)
+                       return count;
+
+               poweroff_seq(pdev);
+               is_disabled = apb->init_disabled;
+               apb->init_disabled = false;
+               ret = coldboot_seq(pdev);
+               if (ret)
+                       apb->init_disabled = is_disabled;
+       } else if (sysfs_streq(buf, "standby")) {
+               if (apb->state == ARCHE_PLATFORM_STATE_STANDBY)
+                       return count;
+
+               ret = standby_boot_seq(pdev);
+       } else if (sysfs_streq(buf, "fw_flashing")) {
+               if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
+                       return count;
+
+               /* First we want to make sure we power off everything
+                * and then enter FW flashing state */
+               poweroff_seq(pdev);
+               ret = fw_flashing_seq(pdev);
+       } else {
+               dev_err(dev, "unknown state\n");
+               ret = -EINVAL;
+       }
+
+       return ret ? ret : count;
+}
+
+static ssize_t state_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
+
+       switch (apb->state) {
+       case ARCHE_PLATFORM_STATE_OFF:
+               return sprintf(buf, "off%s\n",
+                               apb->init_disabled ? ",disabled" : "");
+       case ARCHE_PLATFORM_STATE_ACTIVE:
+               return sprintf(buf, "active\n");
+       case ARCHE_PLATFORM_STATE_STANDBY:
+               return sprintf(buf, "standby\n");
+       case ARCHE_PLATFORM_STATE_FW_FLASHING:
+               return sprintf(buf, "fw_flashing\n");
+       default:
+               return sprintf(buf, "unknown state\n");
+       }
+}
+
+static DEVICE_ATTR_RW(state);
+
+static int apb_ctrl_get_devtree_data(struct platform_device *pdev,
+               struct arche_apb_ctrl_drvdata *apb)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       int ret;
+
+       apb->resetn_gpio = of_get_named_gpio(np, "reset-gpios", 0);
+       if (apb->resetn_gpio < 0) {
+               dev_err(dev, "failed to get reset gpio\n");
+               return apb->resetn_gpio;
+       }
+       ret = devm_gpio_request_one(dev, apb->resetn_gpio,
+                       GPIOF_OUT_INIT_LOW, "apb-reset");
+       if (ret) {
+               dev_err(dev, "Failed requesting reset gpio %d\n",
+                               apb->resetn_gpio);
+               return ret;
+       }
+
+       apb->boot_ret_gpio = of_get_named_gpio(np, "boot-ret-gpios", 0);
+       if (apb->boot_ret_gpio < 0) {
+               dev_err(dev, "failed to get boot retention gpio\n");
+               return apb->boot_ret_gpio;
+       }
+       ret = devm_gpio_request_one(dev, apb->boot_ret_gpio,
+                       GPIOF_OUT_INIT_LOW, "boot retention");
+       if (ret) {
+               dev_err(dev, "Failed requesting bootret gpio %d\n",
+                               apb->boot_ret_gpio);
+               return ret;
+       }
+
+       /* It's not mandatory to support power management interface */
+       apb->pwroff_gpio = of_get_named_gpio(np, "pwr-off-gpios", 0);
+       if (apb->pwroff_gpio < 0) {
+               dev_err(dev, "failed to get power off gpio\n");
+               return apb->pwroff_gpio;
+       }
+       ret = devm_gpio_request_one(dev, apb->pwroff_gpio,
+                       GPIOF_IN, "pwroff_n");
+       if (ret) {
+               dev_err(dev, "Failed requesting pwroff_n gpio %d\n",
+                               apb->pwroff_gpio);
+               return ret;
+       }
+
+       /* Do not make clock mandatory as of now (for DB3) */
+       apb->clk_en_gpio = of_get_named_gpio(np, "clock-en-gpio", 0);
+       if (apb->clk_en_gpio < 0) {
+               dev_warn(dev, "failed to get clock en gpio\n");
+       } else if (gpio_is_valid(apb->clk_en_gpio)) {
+               ret = devm_gpio_request_one(dev, apb->clk_en_gpio,
+                               GPIOF_OUT_INIT_LOW, "apb_clk_en");
+               if (ret) {
+                       dev_warn(dev, "Failed requesting APB clock en gpio %d\n",
+                                       apb->clk_en_gpio);
+                       return ret;
+               }
+       }
+
+       apb->pwrdn_gpio = of_get_named_gpio(np, "pwr-down-gpios", 0);
+       if (apb->pwrdn_gpio < 0)
+               dev_warn(dev, "failed to get power down gpio\n");
+
+       /* Regulators are optional, as we may have fixed supply coming in */
+       apb->vcore = devm_regulator_get(dev, "vcore");
+       if (IS_ERR(apb->vcore))
+               dev_warn(dev, "no core regulator found\n");
+
+       apb->vio = devm_regulator_get(dev, "vio");
+       if (IS_ERR(apb->vio))
+               dev_warn(dev, "no IO regulator found\n");
+
+       apb->pinctrl = devm_pinctrl_get(&pdev->dev);
+       if (IS_ERR(apb->pinctrl)) {
+               dev_err(&pdev->dev, "could not get pinctrl handle\n");
+               return PTR_ERR(apb->pinctrl);
+       }
+       apb->pin_default = pinctrl_lookup_state(apb->pinctrl, "default");
+       if (IS_ERR(apb->pin_default)) {
+               dev_err(&pdev->dev, "could not get default pin state\n");
+               return PTR_ERR(apb->pin_default);
+       }
+
+       /* Only applicable for platform >= V2 */
+       apb->spi_en_gpio = of_get_named_gpio(np, "spi-en-gpio", 0);
+       if (apb->spi_en_gpio >= 0) {
+               if (of_property_read_bool(pdev->dev.of_node,
+                                       "spi-en-active-high"))
+                       apb->spi_en_polarity_high = true;
+       }
+
+       return 0;
+}
+
+static int arche_apb_ctrl_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct arche_apb_ctrl_drvdata *apb;
+       struct device *dev = &pdev->dev;
+
+       apb = devm_kzalloc(&pdev->dev, sizeof(*apb), GFP_KERNEL);
+       if (!apb)
+               return -ENOMEM;
+
+       ret = apb_ctrl_get_devtree_data(pdev, apb);
+       if (ret) {
+               dev_err(dev, "failed to get apb devicetree data %d\n", ret);
+               return ret;
+       }
+
+       /* Initially set APB to OFF state */
+       apb->state = ARCHE_PLATFORM_STATE_OFF;
+       /* Check whether device needs to be enabled on boot */
+       if (of_property_read_bool(pdev->dev.of_node, "arche,init-disable"))
+               apb->init_disabled = true;
+
+       platform_set_drvdata(pdev, apb);
+
+       /* Create sysfs interface to allow user to change state dynamically */
+       ret = device_create_file(dev, &dev_attr_state);
+       if (ret) {
+               dev_err(dev, "failed to create state file in sysfs\n");
+               return ret;
+       }
+
+       dev_info(&pdev->dev, "Device registered successfully\n");
+       return 0;
+}
+
+static int arche_apb_ctrl_remove(struct platform_device *pdev)
+{
+       device_remove_file(&pdev->dev, &dev_attr_state);
+       poweroff_seq(pdev);
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+static int arche_apb_ctrl_suspend(struct device *dev)
+{
+       /*
+        * If timing profile permits, we may shutdown bridge
+        * completely
+        *
+        * TODO: sequence ??
+        *
+        * Also, need to make sure we meet precondition for unipro suspend
+        * Precondition: Definition ???
+        */
+       return 0;
+}
+
+static int arche_apb_ctrl_resume(struct device *dev)
+{
+       /*
+        * Atleast for ES2 we have to meet the delay requirement between
+        * unipro switch and AP bridge init, depending on whether bridge is in
+        * OFF state or standby state.
+        *
+        * Based on whether bridge is in standby or OFF state we may have to
+        * assert multiple signals. Please refer to WDM spec, for more info.
+        *
+        */
+       return 0;
+}
+
+static void arche_apb_ctrl_shutdown(struct platform_device *pdev)
+{
+       apb_ctrl_poweroff(&pdev->dev);
+}
+
+static SIMPLE_DEV_PM_OPS(arche_apb_ctrl_pm_ops, arche_apb_ctrl_suspend,
+                        arche_apb_ctrl_resume);
+
+static struct of_device_id arche_apb_ctrl_of_match[] = {
+       { .compatible = "usbffff,2", },
+       { },
+};
+
+static struct platform_driver arche_apb_ctrl_device_driver = {
+       .probe          = arche_apb_ctrl_probe,
+       .remove         = arche_apb_ctrl_remove,
+       .shutdown       = arche_apb_ctrl_shutdown,
+       .driver         = {
+               .name   = "arche-apb-ctrl",
+               .pm     = &arche_apb_ctrl_pm_ops,
+               .of_match_table = arche_apb_ctrl_of_match,
+       }
+};
+
+int __init arche_apb_init(void)
+{
+       return platform_driver_register(&arche_apb_ctrl_device_driver);
+}
+
+void __exit arche_apb_exit(void)
+{
+       platform_driver_unregister(&arche_apb_ctrl_device_driver);
+}
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
new file mode 100644 (file)
index 0000000..9d9048e
--- /dev/null
@@ -0,0 +1,828 @@
+/*
+ * Arche Platform driver to enable Unipro link.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/suspend.h>
+#include <linux/time.h>
+#include "arche_platform.h"
+#include "greybus.h"
+
+#include <linux/usb/usb3613.h>
+
+#define WD_COLDBOOT_PULSE_WIDTH_MS     30
+
+enum svc_wakedetect_state {
+       WD_STATE_IDLE,                  /* Default state = pulled high/low */
+       WD_STATE_BOOT_INIT,             /* WD = falling edge (low) */
+       WD_STATE_COLDBOOT_TRIG,         /* WD = rising edge (high), > 30msec */
+       WD_STATE_STANDBYBOOT_TRIG,      /* As of now not used ?? */
+       WD_STATE_COLDBOOT_START,        /* Cold boot process started */
+       WD_STATE_STANDBYBOOT_START,     /* Not used */
+       WD_STATE_TIMESYNC,
+};
+
+struct arche_platform_drvdata {
+       /* Control GPIO signals to and from AP <=> SVC */
+       int svc_reset_gpio;
+       bool is_reset_act_hi;
+       int svc_sysboot_gpio;
+       int wake_detect_gpio; /* bi-dir,maps to WAKE_MOD & WAKE_FRAME signals */
+
+       enum arche_platform_state state;
+
+       int svc_refclk_req;
+       struct clk *svc_ref_clk;
+
+       struct pinctrl *pinctrl;
+       struct pinctrl_state *pin_default;
+
+       int num_apbs;
+
+       enum svc_wakedetect_state wake_detect_state;
+       int wake_detect_irq;
+       spinlock_t wake_lock;                   /* Protect wake_detect_state */
+       struct mutex platform_state_mutex;      /* Protect state */
+       wait_queue_head_t wq;                   /* WQ for arche_pdata->state */
+       unsigned long wake_detect_start;
+       struct notifier_block pm_notifier;
+
+       struct device *dev;
+       struct gb_timesync_svc *timesync_svc_pdata;
+};
+
+static int arche_apb_bootret_assert(struct device *dev, void *data)
+{
+       apb_bootret_assert(dev);
+       return 0;
+}
+
+static int arche_apb_bootret_deassert(struct device *dev, void *data)
+{
+       apb_bootret_deassert(dev);
+       return 0;
+}
+
+/* Requires calling context to hold arche_pdata->platform_state_mutex */
+static void arche_platform_set_state(struct arche_platform_drvdata *arche_pdata,
+                                    enum arche_platform_state state)
+{
+       arche_pdata->state = state;
+}
+
+/*
+ * arche_platform_change_state: Change the operational state
+ *
+ * This exported function allows external drivers to change the state
+ * of the arche-platform driver.
+ * Note that this function only supports transitions between two states
+ * with limited functionality.
+ *
+ *  - ARCHE_PLATFORM_STATE_TIME_SYNC:
+ *    Once set, allows timesync operations between SVC <=> AP and makes
+ *    sure that arche-platform driver ignores any subsequent events/pulses
+ *    from SVC over wake/detect.
+ *
+ *  - ARCHE_PLATFORM_STATE_ACTIVE:
+ *    Puts back driver to active state, where any pulse from SVC on wake/detect
+ *    line would trigger either cold/standby boot.
+ *    Note: Transition request from this function does not trigger cold/standby
+ *          boot. It just puts back driver book keeping variable back to ACTIVE
+ *          state and restores the interrupt.
+ *
+ * Returns -ENODEV if device not found, -EAGAIN if the driver cannot currently
+ * satisfy the requested state-transition or -EINVAL for all other
+ * state-transition requests.
+ */
+int arche_platform_change_state(enum arche_platform_state state,
+                               struct gb_timesync_svc *timesync_svc_pdata)
+{
+       struct arche_platform_drvdata *arche_pdata;
+       struct platform_device *pdev;
+       struct device_node *np;
+       int ret = -EAGAIN;
+       unsigned long flags;
+
+       np = of_find_compatible_node(NULL, NULL, "google,arche-platform");
+       if (!np) {
+               pr_err("google,arche-platform device node not found\n");
+               return -ENODEV;
+       }
+
+       pdev = of_find_device_by_node(np);
+       if (!pdev) {
+               pr_err("arche-platform device not found\n");
+               return -ENODEV;
+       }
+
+       arche_pdata = platform_get_drvdata(pdev);
+
+       mutex_lock(&arche_pdata->platform_state_mutex);
+       spin_lock_irqsave(&arche_pdata->wake_lock, flags);
+
+       if (arche_pdata->state == state) {
+               ret = 0;
+               goto exit;
+       }
+
+       switch (state) {
+       case ARCHE_PLATFORM_STATE_TIME_SYNC:
+               if (arche_pdata->state != ARCHE_PLATFORM_STATE_ACTIVE) {
+                       ret = -EINVAL;
+                       goto exit;
+               }
+               if (arche_pdata->wake_detect_state != WD_STATE_IDLE) {
+                       dev_err(arche_pdata->dev,
+                               "driver busy with wake/detect line ops\n");
+                       goto  exit;
+               }
+               device_for_each_child(arche_pdata->dev, NULL,
+                                     arche_apb_bootret_assert);
+               arche_pdata->wake_detect_state = WD_STATE_TIMESYNC;
+               break;
+       case ARCHE_PLATFORM_STATE_ACTIVE:
+               if (arche_pdata->state != ARCHE_PLATFORM_STATE_TIME_SYNC) {
+                       ret = -EINVAL;
+                       goto exit;
+               }
+               device_for_each_child(arche_pdata->dev, NULL,
+                                     arche_apb_bootret_deassert);
+               arche_pdata->wake_detect_state = WD_STATE_IDLE;
+               break;
+       case ARCHE_PLATFORM_STATE_OFF:
+       case ARCHE_PLATFORM_STATE_STANDBY:
+       case ARCHE_PLATFORM_STATE_FW_FLASHING:
+               dev_err(arche_pdata->dev, "busy, request to retry later\n");
+               goto exit;
+       default:
+               ret = -EINVAL;
+               dev_err(arche_pdata->dev,
+                       "invalid state transition request\n");
+               goto exit;
+       }
+       arche_pdata->timesync_svc_pdata = timesync_svc_pdata;
+       arche_platform_set_state(arche_pdata, state);
+       if (state == ARCHE_PLATFORM_STATE_ACTIVE)
+               wake_up(&arche_pdata->wq);
+
+       ret = 0;
+exit:
+       spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+       mutex_unlock(&arche_pdata->platform_state_mutex);
+       of_node_put(np);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(arche_platform_change_state);
+
+/* Requires arche_pdata->wake_lock is held by calling context */
+static void arche_platform_set_wake_detect_state(
+                               struct arche_platform_drvdata *arche_pdata,
+                               enum svc_wakedetect_state state)
+{
+       arche_pdata->wake_detect_state = state;
+}
+
+static inline void svc_reset_onoff(unsigned int gpio, bool onoff)
+{
+       gpio_set_value(gpio, onoff);
+}
+
+static int apb_cold_boot(struct device *dev, void *data)
+{
+       int ret;
+
+       ret = apb_ctrl_coldboot(dev);
+       if (ret)
+               dev_warn(dev, "failed to coldboot\n");
+
+       /*Child nodes are independent, so do not exit coldboot operation */
+       return 0;
+}
+
+static int apb_poweroff(struct device *dev, void *data)
+{
+       apb_ctrl_poweroff(dev);
+
+       /* Enable HUB3613 into HUB mode. */
+       if (usb3613_hub_mode_ctrl(false))
+               dev_warn(dev, "failed to control hub device\n");
+
+       return 0;
+}
+
+static void arche_platform_wd_irq_en(struct arche_platform_drvdata *arche_pdata)
+{
+       /* Enable interrupt here, to read event back from SVC */
+       gpio_direction_input(arche_pdata->wake_detect_gpio);
+       enable_irq(arche_pdata->wake_detect_irq);
+}
+
+static irqreturn_t arche_platform_wd_irq_thread(int irq, void *devid)
+{
+       struct arche_platform_drvdata *arche_pdata = devid;
+       unsigned long flags;
+
+       spin_lock_irqsave(&arche_pdata->wake_lock, flags);
+       if (arche_pdata->wake_detect_state != WD_STATE_COLDBOOT_TRIG) {
+               /* Something is wrong */
+               spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+               return IRQ_HANDLED;
+       }
+
+       arche_platform_set_wake_detect_state(arche_pdata,
+                                            WD_STATE_COLDBOOT_START);
+       spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+
+       /* It should complete power cycle, so first make sure it is poweroff */
+       device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
+
+       /* Bring APB out of reset: cold boot sequence */
+       device_for_each_child(arche_pdata->dev, NULL, apb_cold_boot);
+
+       /* Enable HUB3613 into HUB mode. */
+       if (usb3613_hub_mode_ctrl(true))
+               dev_warn(arche_pdata->dev, "failed to control hub device\n");
+
+       spin_lock_irqsave(&arche_pdata->wake_lock, flags);
+       arche_platform_set_wake_detect_state(arche_pdata, WD_STATE_IDLE);
+       spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t arche_platform_wd_irq(int irq, void *devid)
+{
+       struct arche_platform_drvdata *arche_pdata = devid;
+       unsigned long flags;
+
+       spin_lock_irqsave(&arche_pdata->wake_lock, flags);
+
+       if (arche_pdata->wake_detect_state == WD_STATE_TIMESYNC) {
+               gb_timesync_irq(arche_pdata->timesync_svc_pdata);
+               goto exit;
+       }
+
+       if (gpio_get_value(arche_pdata->wake_detect_gpio)) {
+               /* wake/detect rising */
+
+               /*
+                * If wake/detect line goes high after low, within less than
+                * 30msec, then standby boot sequence is initiated, which is not
+                * supported/implemented as of now. So ignore it.
+                */
+               if (arche_pdata->wake_detect_state == WD_STATE_BOOT_INIT) {
+                       if (time_before(jiffies,
+                                       arche_pdata->wake_detect_start +
+                                       msecs_to_jiffies(WD_COLDBOOT_PULSE_WIDTH_MS))) {
+                               arche_platform_set_wake_detect_state(arche_pdata,
+                                                                    WD_STATE_IDLE);
+                       } else {
+                               /* Check we are not in middle of irq thread already */
+                               if (arche_pdata->wake_detect_state !=
+                                               WD_STATE_COLDBOOT_START) {
+                                       arche_platform_set_wake_detect_state(arche_pdata,
+                                                                            WD_STATE_COLDBOOT_TRIG);
+                                       spin_unlock_irqrestore(
+                                               &arche_pdata->wake_lock,
+                                               flags);
+                                       return IRQ_WAKE_THREAD;
+                               }
+                       }
+               }
+       } else {
+               /* wake/detect falling */
+               if (arche_pdata->wake_detect_state == WD_STATE_IDLE) {
+                       arche_pdata->wake_detect_start = jiffies;
+                       /*
+                        * In the begining, when wake/detect goes low (first time), we assume
+                        * it is meant for coldboot and set the flag. If wake/detect line stays low
+                        * beyond 30msec, then it is coldboot else fallback to standby boot.
+                        */
+                       arche_platform_set_wake_detect_state(arche_pdata,
+                                                            WD_STATE_BOOT_INIT);
+               }
+       }
+
+exit:
+       spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * Requires arche_pdata->platform_state_mutex to be held
+ */
+static int arche_platform_coldboot_seq(struct arche_platform_drvdata *arche_pdata)
+{
+       int ret;
+
+       if (arche_pdata->state == ARCHE_PLATFORM_STATE_ACTIVE)
+               return 0;
+
+       dev_info(arche_pdata->dev, "Booting from cold boot state\n");
+
+       svc_reset_onoff(arche_pdata->svc_reset_gpio,
+                       arche_pdata->is_reset_act_hi);
+
+       gpio_set_value(arche_pdata->svc_sysboot_gpio, 0);
+       usleep_range(100, 200);
+
+       ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
+       if (ret) {
+               dev_err(arche_pdata->dev, "failed to enable svc_ref_clk: %d\n",
+                               ret);
+               return ret;
+       }
+
+       /* bring SVC out of reset */
+       svc_reset_onoff(arche_pdata->svc_reset_gpio,
+                       !arche_pdata->is_reset_act_hi);
+
+       arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_ACTIVE);
+
+       return 0;
+}
+
+/*
+ * Requires arche_pdata->platform_state_mutex to be held
+ */
+static int arche_platform_fw_flashing_seq(struct arche_platform_drvdata *arche_pdata)
+{
+       int ret;
+
+       if (arche_pdata->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
+               return 0;
+
+       dev_info(arche_pdata->dev, "Switching to FW flashing state\n");
+
+       svc_reset_onoff(arche_pdata->svc_reset_gpio,
+                       arche_pdata->is_reset_act_hi);
+
+       gpio_set_value(arche_pdata->svc_sysboot_gpio, 1);
+
+       usleep_range(100, 200);
+
+       ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
+       if (ret) {
+               dev_err(arche_pdata->dev, "failed to enable svc_ref_clk: %d\n",
+                               ret);
+               return ret;
+       }
+
+       svc_reset_onoff(arche_pdata->svc_reset_gpio,
+                       !arche_pdata->is_reset_act_hi);
+
+       arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_FW_FLASHING);
+
+       return 0;
+}
+
+/*
+ * Requires arche_pdata->platform_state_mutex to be held
+ */
+static void arche_platform_poweroff_seq(struct arche_platform_drvdata *arche_pdata)
+{
+       unsigned long flags;
+
+       if (arche_pdata->state == ARCHE_PLATFORM_STATE_OFF)
+               return;
+
+       /* If in fw_flashing mode, then no need to repeate things again */
+       if (arche_pdata->state != ARCHE_PLATFORM_STATE_FW_FLASHING) {
+               disable_irq(arche_pdata->wake_detect_irq);
+
+               spin_lock_irqsave(&arche_pdata->wake_lock, flags);
+               arche_platform_set_wake_detect_state(arche_pdata,
+                                                    WD_STATE_IDLE);
+               spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+       }
+
+       clk_disable_unprepare(arche_pdata->svc_ref_clk);
+
+       /* As part of exit, put APB back in reset state */
+       svc_reset_onoff(arche_pdata->svc_reset_gpio,
+                       arche_pdata->is_reset_act_hi);
+
+       arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
+}
+
+static ssize_t state_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
+       int ret = 0;
+
+retry:
+       mutex_lock(&arche_pdata->platform_state_mutex);
+       if (arche_pdata->state == ARCHE_PLATFORM_STATE_TIME_SYNC) {
+               mutex_unlock(&arche_pdata->platform_state_mutex);
+               ret = wait_event_interruptible(
+                       arche_pdata->wq,
+                       arche_pdata->state != ARCHE_PLATFORM_STATE_TIME_SYNC);
+               if (ret)
+                       return ret;
+               goto retry;
+       }
+
+       if (sysfs_streq(buf, "off")) {
+               if (arche_pdata->state == ARCHE_PLATFORM_STATE_OFF)
+                       goto exit;
+
+               /*  If SVC goes down, bring down APB's as well */
+               device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
+
+               arche_platform_poweroff_seq(arche_pdata);
+
+       } else if (sysfs_streq(buf, "active")) {
+               if (arche_pdata->state == ARCHE_PLATFORM_STATE_ACTIVE)
+                       goto exit;
+
+               /* First we want to make sure we power off everything
+                * and then activate back again */
+               device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
+               arche_platform_poweroff_seq(arche_pdata);
+
+               arche_platform_wd_irq_en(arche_pdata);
+               ret = arche_platform_coldboot_seq(arche_pdata);
+               if (ret)
+                       goto exit;
+
+       } else if (sysfs_streq(buf, "standby")) {
+               if (arche_pdata->state == ARCHE_PLATFORM_STATE_STANDBY)
+                       goto exit;
+
+               dev_warn(arche_pdata->dev, "standby state not supported\n");
+       } else if (sysfs_streq(buf, "fw_flashing")) {
+               if (arche_pdata->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
+                       goto exit;
+
+               /*
+                * Here we only control SVC.
+                *
+                * In case of FW_FLASHING mode we do not want to control
+                * APBs, as in case of V2, SPI bus is shared between both
+                * the APBs. So let user chose which APB he wants to flash.
+                */
+               arche_platform_poweroff_seq(arche_pdata);
+
+               ret = arche_platform_fw_flashing_seq(arche_pdata);
+               if (ret)
+                       goto exit;
+       } else {
+               dev_err(arche_pdata->dev, "unknown state\n");
+               ret = -EINVAL;
+       }
+
+exit:
+       mutex_unlock(&arche_pdata->platform_state_mutex);
+       return ret ? ret : count;
+}
+
+static ssize_t state_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct arche_platform_drvdata *arche_pdata = dev_get_drvdata(dev);
+
+       switch (arche_pdata->state) {
+       case ARCHE_PLATFORM_STATE_OFF:
+               return sprintf(buf, "off\n");
+       case ARCHE_PLATFORM_STATE_ACTIVE:
+               return sprintf(buf, "active\n");
+       case ARCHE_PLATFORM_STATE_STANDBY:
+               return sprintf(buf, "standby\n");
+       case ARCHE_PLATFORM_STATE_FW_FLASHING:
+               return sprintf(buf, "fw_flashing\n");
+       case ARCHE_PLATFORM_STATE_TIME_SYNC:
+               return sprintf(buf, "time_sync\n");
+       default:
+               return sprintf(buf, "unknown state\n");
+       }
+}
+
+static DEVICE_ATTR_RW(state);
+
+static int arche_platform_pm_notifier(struct notifier_block *notifier,
+                                     unsigned long pm_event, void *unused)
+{
+       struct arche_platform_drvdata *arche_pdata =
+               container_of(notifier, struct arche_platform_drvdata,
+                            pm_notifier);
+       int ret = NOTIFY_DONE;
+
+       mutex_lock(&arche_pdata->platform_state_mutex);
+       switch (pm_event) {
+       case PM_SUSPEND_PREPARE:
+               if (arche_pdata->state != ARCHE_PLATFORM_STATE_ACTIVE) {
+                       ret = NOTIFY_STOP;
+                       break;
+               }
+               device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
+               arche_platform_poweroff_seq(arche_pdata);
+               break;
+       case PM_POST_SUSPEND:
+               if (arche_pdata->state != ARCHE_PLATFORM_STATE_OFF)
+                       break;
+
+               arche_platform_wd_irq_en(arche_pdata);
+               arche_platform_coldboot_seq(arche_pdata);
+               break;
+       default:
+               break;
+       }
+       mutex_unlock(&arche_pdata->platform_state_mutex);
+
+       return ret;
+}
+
+static int arche_platform_probe(struct platform_device *pdev)
+{
+       struct arche_platform_drvdata *arche_pdata;
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       int ret;
+
+       arche_pdata = devm_kzalloc(&pdev->dev, sizeof(*arche_pdata), GFP_KERNEL);
+       if (!arche_pdata)
+               return -ENOMEM;
+
+       /* setup svc reset gpio */
+       arche_pdata->is_reset_act_hi = of_property_read_bool(np,
+                                       "svc,reset-active-high");
+       arche_pdata->svc_reset_gpio = of_get_named_gpio(np, "svc,reset-gpio", 0);
+       if (arche_pdata->svc_reset_gpio < 0) {
+               dev_err(dev, "failed to get reset-gpio\n");
+               return arche_pdata->svc_reset_gpio;
+       }
+       ret = devm_gpio_request(dev, arche_pdata->svc_reset_gpio, "svc-reset");
+       if (ret) {
+               dev_err(dev, "failed to request svc-reset gpio:%d\n", ret);
+               return ret;
+       }
+       ret = gpio_direction_output(arche_pdata->svc_reset_gpio,
+                                       arche_pdata->is_reset_act_hi);
+       if (ret) {
+               dev_err(dev, "failed to set svc-reset gpio dir:%d\n", ret);
+               return ret;
+       }
+       arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
+
+       arche_pdata->svc_sysboot_gpio = of_get_named_gpio(np,
+                                       "svc,sysboot-gpio", 0);
+       if (arche_pdata->svc_sysboot_gpio < 0) {
+               dev_err(dev, "failed to get sysboot gpio\n");
+               return arche_pdata->svc_sysboot_gpio;
+       }
+       ret = devm_gpio_request(dev, arche_pdata->svc_sysboot_gpio, "sysboot0");
+       if (ret) {
+               dev_err(dev, "failed to request sysboot0 gpio:%d\n", ret);
+               return ret;
+       }
+       ret = gpio_direction_output(arche_pdata->svc_sysboot_gpio, 0);
+       if (ret) {
+               dev_err(dev, "failed to set svc-reset gpio dir:%d\n", ret);
+               return ret;
+       }
+
+       /* setup the clock request gpio first */
+       arche_pdata->svc_refclk_req = of_get_named_gpio(np,
+                                       "svc,refclk-req-gpio", 0);
+       if (arche_pdata->svc_refclk_req < 0) {
+               dev_err(dev, "failed to get svc clock-req gpio\n");
+               return arche_pdata->svc_refclk_req;
+       }
+       ret = devm_gpio_request(dev, arche_pdata->svc_refclk_req, "svc-clk-req");
+       if (ret) {
+               dev_err(dev, "failed to request svc-clk-req gpio: %d\n", ret);
+               return ret;
+       }
+       ret = gpio_direction_input(arche_pdata->svc_refclk_req);
+       if (ret) {
+               dev_err(dev, "failed to set svc-clk-req gpio dir :%d\n", ret);
+               return ret;
+       }
+
+       /* setup refclk2 to follow the pin */
+       arche_pdata->svc_ref_clk = devm_clk_get(dev, "svc_ref_clk");
+       if (IS_ERR(arche_pdata->svc_ref_clk)) {
+               ret = PTR_ERR(arche_pdata->svc_ref_clk);
+               dev_err(dev, "failed to get svc_ref_clk: %d\n", ret);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, arche_pdata);
+
+       arche_pdata->num_apbs = of_get_child_count(np);
+       dev_dbg(dev, "Number of APB's available - %d\n", arche_pdata->num_apbs);
+
+       arche_pdata->wake_detect_gpio = of_get_named_gpio(np, "svc,wake-detect-gpio", 0);
+       if (arche_pdata->wake_detect_gpio < 0) {
+               dev_err(dev, "failed to get wake detect gpio\n");
+               ret = arche_pdata->wake_detect_gpio;
+               return ret;
+       }
+
+       ret = devm_gpio_request(dev, arche_pdata->wake_detect_gpio, "wake detect");
+       if (ret) {
+               dev_err(dev, "Failed requesting wake_detect gpio %d\n",
+                               arche_pdata->wake_detect_gpio);
+               return ret;
+       }
+
+       arche_platform_set_wake_detect_state(arche_pdata, WD_STATE_IDLE);
+
+       arche_pdata->dev = &pdev->dev;
+
+       spin_lock_init(&arche_pdata->wake_lock);
+       mutex_init(&arche_pdata->platform_state_mutex);
+       init_waitqueue_head(&arche_pdata->wq);
+       arche_pdata->wake_detect_irq =
+               gpio_to_irq(arche_pdata->wake_detect_gpio);
+
+       ret = devm_request_threaded_irq(dev, arche_pdata->wake_detect_irq,
+                       arche_platform_wd_irq,
+                       arche_platform_wd_irq_thread,
+                       IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                       dev_name(dev), arche_pdata);
+       if (ret) {
+               dev_err(dev, "failed to request wake detect IRQ %d\n", ret);
+               return ret;
+       }
+       disable_irq(arche_pdata->wake_detect_irq);
+
+       ret = device_create_file(dev, &dev_attr_state);
+       if (ret) {
+               dev_err(dev, "failed to create state file in sysfs\n");
+               return ret;
+       }
+
+       ret = of_platform_populate(np, NULL, NULL, dev);
+       if (ret) {
+               dev_err(dev, "failed to populate child nodes %d\n", ret);
+               goto err_device_remove;
+       }
+
+       arche_pdata->pm_notifier.notifier_call = arche_platform_pm_notifier;
+       ret = register_pm_notifier(&arche_pdata->pm_notifier);
+
+       if (ret) {
+               dev_err(dev, "failed to register pm notifier %d\n", ret);
+               goto err_device_remove;
+       }
+
+       /* Register callback pointer */
+       arche_platform_change_state_cb = arche_platform_change_state;
+
+       /* Explicitly power off if requested */
+       if (!of_property_read_bool(pdev->dev.of_node, "arche,init-off")) {
+               mutex_lock(&arche_pdata->platform_state_mutex);
+               ret = arche_platform_coldboot_seq(arche_pdata);
+               if (ret) {
+                       dev_err(dev, "Failed to cold boot svc %d\n", ret);
+                       goto err_coldboot;
+               }
+               arche_platform_wd_irq_en(arche_pdata);
+               mutex_unlock(&arche_pdata->platform_state_mutex);
+       }
+
+       dev_info(dev, "Device registered successfully\n");
+       return 0;
+
+err_coldboot:
+       mutex_unlock(&arche_pdata->platform_state_mutex);
+err_device_remove:
+       device_remove_file(&pdev->dev, &dev_attr_state);
+       return ret;
+}
+
+static int arche_remove_child(struct device *dev, void *unused)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+
+       platform_device_unregister(pdev);
+
+       return 0;
+}
+
+static int arche_platform_remove(struct platform_device *pdev)
+{
+       struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
+
+       unregister_pm_notifier(&arche_pdata->pm_notifier);
+       device_remove_file(&pdev->dev, &dev_attr_state);
+       device_for_each_child(&pdev->dev, NULL, arche_remove_child);
+       arche_platform_poweroff_seq(arche_pdata);
+       platform_set_drvdata(pdev, NULL);
+
+       if (usb3613_hub_mode_ctrl(false))
+               dev_warn(arche_pdata->dev, "failed to control hub device\n");
+               /* TODO: Should we do anything more here ?? */
+       return 0;
+}
+
+static int arche_platform_suspend(struct device *dev)
+{
+       /*
+        * If timing profile premits, we may shutdown bridge
+        * completely
+        *
+        * TODO: sequence ??
+        *
+        * Also, need to make sure we meet precondition for unipro suspend
+        * Precondition: Definition ???
+        */
+       return 0;
+}
+
+static int arche_platform_resume(struct device *dev)
+{
+       /*
+        * Atleast for ES2 we have to meet the delay requirement between
+        * unipro switch and AP bridge init, depending on whether bridge is in
+        * OFF state or standby state.
+        *
+        * Based on whether bridge is in standby or OFF state we may have to
+        * assert multiple signals. Please refer to WDM spec, for more info.
+        *
+        */
+       return 0;
+}
+
+static void arche_platform_shutdown(struct platform_device *pdev)
+{
+       struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
+
+       arche_platform_poweroff_seq(arche_pdata);
+
+       usb3613_hub_mode_ctrl(false);
+}
+
+static SIMPLE_DEV_PM_OPS(arche_platform_pm_ops,
+                       arche_platform_suspend,
+                       arche_platform_resume);
+
+static struct of_device_id arche_platform_of_match[] = {
+       { .compatible = "google,arche-platform", }, /* Use PID/VID of SVC device */
+       { },
+};
+
+static struct of_device_id arche_combined_id[] = {
+       { .compatible = "google,arche-platform", }, /* Use PID/VID of SVC device */
+       { .compatible = "usbffff,2", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, arche_combined_id);
+
+static struct platform_driver arche_platform_device_driver = {
+       .probe          = arche_platform_probe,
+       .remove         = arche_platform_remove,
+       .shutdown       = arche_platform_shutdown,
+       .driver         = {
+               .name   = "arche-platform-ctrl",
+               .pm     = &arche_platform_pm_ops,
+               .of_match_table = arche_platform_of_match,
+       }
+};
+
+static int __init arche_init(void)
+{
+       int retval;
+
+       retval = platform_driver_register(&arche_platform_device_driver);
+       if (retval)
+               return retval;
+
+       retval = arche_apb_init();
+       if (retval)
+               platform_driver_unregister(&arche_platform_device_driver);
+
+       return retval;
+}
+module_init(arche_init);
+
+static void __exit arche_exit(void)
+{
+       arche_apb_exit();
+       platform_driver_unregister(&arche_platform_device_driver);
+}
+module_exit(arche_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vaibhav Hiremath <vaibhav.hiremath@linaro.org>");
+MODULE_DESCRIPTION("Arche Platform Driver");
diff --git a/drivers/staging/greybus/arche_platform.h b/drivers/staging/greybus/arche_platform.h
new file mode 100644 (file)
index 0000000..bd12345
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Arche Platform driver to enable Unipro link.
+ *
+ * Copyright 2015-2016 Google Inc.
+ * Copyright 2015-2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __ARCHE_PLATFORM_H
+#define __ARCHE_PLATFORM_H
+
+#include "timesync.h"
+
+enum arche_platform_state {
+       ARCHE_PLATFORM_STATE_OFF,
+       ARCHE_PLATFORM_STATE_ACTIVE,
+       ARCHE_PLATFORM_STATE_STANDBY,
+       ARCHE_PLATFORM_STATE_FW_FLASHING,
+       ARCHE_PLATFORM_STATE_TIME_SYNC,
+};
+
+int arche_platform_change_state(enum arche_platform_state state,
+                               struct gb_timesync_svc *pdata);
+
+extern int (*arche_platform_change_state_cb)(enum arche_platform_state state,
+                                            struct gb_timesync_svc *pdata);
+int __init arche_apb_init(void);
+void __exit arche_apb_exit(void);
+
+/* Operational states for the APB device */
+int apb_ctrl_coldboot(struct device *dev);
+int apb_ctrl_fw_flashing(struct device *dev);
+int apb_ctrl_standby_boot(struct device *dev);
+void apb_ctrl_poweroff(struct device *dev);
+void apb_bootret_assert(struct device *dev);
+void apb_bootret_deassert(struct device *dev);
+
+#endif /* __ARCHE_PLATFORM_H */
diff --git a/drivers/staging/greybus/arpc.h b/drivers/staging/greybus/arpc.h
new file mode 100644 (file)
index 0000000..7fbddfc
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of Google Inc. or Linaro Ltd. nor the names of
+ *    its contributors may be used to endorse or promote products
+ *    derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
+ * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ARPC_H
+#define __ARPC_H
+
+/* APBridgeA RPC (ARPC) */
+
+enum arpc_result {
+       ARPC_SUCCESS            = 0x00,
+       ARPC_NO_MEMORY          = 0x01,
+       ARPC_INVALID            = 0x02,
+       ARPC_TIMEOUT            = 0x03,
+       ARPC_UNKNOWN_ERROR      = 0xff,
+};
+
+struct arpc_request_message {
+       __le16  id;             /* RPC unique id */
+       __le16  size;           /* Size in bytes of header + payload */
+       __u8    type;           /* RPC type */
+       __u8    data[0];        /* ARPC data */
+} __packed;
+
+struct arpc_response_message {
+       __le16  id;             /* RPC unique id */
+       __u8    result;         /* Result of RPC */
+} __packed;
+
+
+/* ARPC requests */
+#define ARPC_TYPE_CPORT_CONNECTED              0x01
+#define ARPC_TYPE_CPORT_QUIESCE                        0x02
+#define ARPC_TYPE_CPORT_CLEAR                  0x03
+#define ARPC_TYPE_CPORT_FLUSH                  0x04
+#define ARPC_TYPE_CPORT_SHUTDOWN               0x05
+
+struct arpc_cport_connected_req {
+       __le16 cport_id;
+} __packed;
+
+struct arpc_cport_quiesce_req {
+       __le16 cport_id;
+       __le16 peer_space;
+       __le16 timeout;
+} __packed;
+
+struct arpc_cport_clear_req {
+       __le16 cport_id;
+} __packed;
+
+struct arpc_cport_flush_req {
+       __le16 cport_id;
+} __packed;
+
+struct arpc_cport_shutdown_req {
+       __le16 cport_id;
+       __le16 timeout;
+       __u8 phase;
+} __packed;
+
+#endif /* __ARPC_H */
diff --git a/drivers/staging/greybus/audio_apbridgea.c b/drivers/staging/greybus/audio_apbridgea.c
new file mode 100644 (file)
index 0000000..1b4252d
--- /dev/null
@@ -0,0 +1,207 @@
+/*
+ * Greybus Audio Device Class Protocol helpers
+ *
+ * Copyright 2015-2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+#include "greybus_protocols.h"
+#include "audio_apbridgea.h"
+#include "audio_codec.h"
+
+int gb_audio_apbridgea_set_config(struct gb_connection *connection,
+                                 __u16 i2s_port, __u32 format, __u32 rate,
+                                 __u32 mclk_freq)
+{
+       struct audio_apbridgea_set_config_request req;
+
+       req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_CONFIG;
+       req.hdr.i2s_port = cpu_to_le16(i2s_port);
+       req.format = cpu_to_le32(format);
+       req.rate = cpu_to_le32(rate);
+       req.mclk_freq = cpu_to_le32(mclk_freq);
+
+       return gb_hd_output(connection->hd, &req, sizeof(req),
+                           GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_config);
+
+int gb_audio_apbridgea_register_cport(struct gb_connection *connection,
+                                     __u16 i2s_port, __u16 cportid,
+                                     __u8 direction)
+{
+       struct audio_apbridgea_register_cport_request req;
+       int ret;
+
+       req.hdr.type = AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT;
+       req.hdr.i2s_port = cpu_to_le16(i2s_port);
+       req.cport = cpu_to_le16(cportid);
+       req.direction = direction;
+
+       ret = gb_pm_runtime_get_sync(connection->bundle);
+       if (ret)
+               return ret;
+
+       return gb_hd_output(connection->hd, &req, sizeof(req),
+                           GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_register_cport);
+
+int gb_audio_apbridgea_unregister_cport(struct gb_connection *connection,
+                                       __u16 i2s_port, __u16 cportid,
+                                       __u8 direction)
+{
+       struct audio_apbridgea_unregister_cport_request req;
+       int ret;
+
+       req.hdr.type = AUDIO_APBRIDGEA_TYPE_UNREGISTER_CPORT;
+       req.hdr.i2s_port = cpu_to_le16(i2s_port);
+       req.cport = cpu_to_le16(cportid);
+       req.direction = direction;
+
+       ret = gb_hd_output(connection->hd, &req, sizeof(req),
+                          GB_APB_REQUEST_AUDIO_CONTROL, true);
+
+       gb_pm_runtime_put_autosuspend(connection->bundle);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_unregister_cport);
+
+int gb_audio_apbridgea_set_tx_data_size(struct gb_connection *connection,
+                                       __u16 i2s_port, __u16 size)
+{
+       struct audio_apbridgea_set_tx_data_size_request req;
+
+       req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_TX_DATA_SIZE;
+       req.hdr.i2s_port = cpu_to_le16(i2s_port);
+       req.size = cpu_to_le16(size);
+
+       return gb_hd_output(connection->hd, &req, sizeof(req),
+                           GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_tx_data_size);
+
+int gb_audio_apbridgea_prepare_tx(struct gb_connection *connection,
+                                 __u16 i2s_port)
+{
+       struct audio_apbridgea_prepare_tx_request req;
+
+       req.hdr.type = AUDIO_APBRIDGEA_TYPE_PREPARE_TX;
+       req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+       return gb_hd_output(connection->hd, &req, sizeof(req),
+                           GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_prepare_tx);
+
+int gb_audio_apbridgea_start_tx(struct gb_connection *connection,
+                               __u16 i2s_port, __u64 timestamp)
+{
+       struct audio_apbridgea_start_tx_request req;
+
+       req.hdr.type = AUDIO_APBRIDGEA_TYPE_START_TX;
+       req.hdr.i2s_port = cpu_to_le16(i2s_port);
+       req.timestamp = cpu_to_le64(timestamp);
+
+       return gb_hd_output(connection->hd, &req, sizeof(req),
+                           GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_start_tx);
+
+int gb_audio_apbridgea_stop_tx(struct gb_connection *connection, __u16 i2s_port)
+{
+       struct audio_apbridgea_stop_tx_request req;
+
+       req.hdr.type = AUDIO_APBRIDGEA_TYPE_STOP_TX;
+       req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+       return gb_hd_output(connection->hd, &req, sizeof(req),
+                           GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_stop_tx);
+
+int gb_audio_apbridgea_shutdown_tx(struct gb_connection *connection,
+                                  __u16 i2s_port)
+{
+       struct audio_apbridgea_shutdown_tx_request req;
+
+       req.hdr.type = AUDIO_APBRIDGEA_TYPE_SHUTDOWN_TX;
+       req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+       return gb_hd_output(connection->hd, &req, sizeof(req),
+                           GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_shutdown_tx);
+
+int gb_audio_apbridgea_set_rx_data_size(struct gb_connection *connection,
+                                       __u16 i2s_port, __u16 size)
+{
+       struct audio_apbridgea_set_rx_data_size_request req;
+
+       req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_RX_DATA_SIZE;
+       req.hdr.i2s_port = cpu_to_le16(i2s_port);
+       req.size = cpu_to_le16(size);
+
+       return gb_hd_output(connection->hd, &req, sizeof(req),
+                           GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_rx_data_size);
+
+int gb_audio_apbridgea_prepare_rx(struct gb_connection *connection,
+                                 __u16 i2s_port)
+{
+       struct audio_apbridgea_prepare_rx_request req;
+
+       req.hdr.type = AUDIO_APBRIDGEA_TYPE_PREPARE_RX;
+       req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+       return gb_hd_output(connection->hd, &req, sizeof(req),
+                           GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_prepare_rx);
+
+int gb_audio_apbridgea_start_rx(struct gb_connection *connection,
+                               __u16 i2s_port)
+{
+       struct audio_apbridgea_start_rx_request req;
+
+       req.hdr.type = AUDIO_APBRIDGEA_TYPE_START_RX;
+       req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+       return gb_hd_output(connection->hd, &req, sizeof(req),
+                           GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_start_rx);
+
+int gb_audio_apbridgea_stop_rx(struct gb_connection *connection, __u16 i2s_port)
+{
+       struct audio_apbridgea_stop_rx_request req;
+
+       req.hdr.type = AUDIO_APBRIDGEA_TYPE_STOP_RX;
+       req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+       return gb_hd_output(connection->hd, &req, sizeof(req),
+                           GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_stop_rx);
+
+int gb_audio_apbridgea_shutdown_rx(struct gb_connection *connection,
+                                  __u16 i2s_port)
+{
+       struct audio_apbridgea_shutdown_rx_request req;
+
+       req.hdr.type = AUDIO_APBRIDGEA_TYPE_SHUTDOWN_RX;
+       req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+       return gb_hd_output(connection->hd, &req, sizeof(req),
+                           GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_shutdown_rx);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("greybus:audio-apbridgea");
+MODULE_DESCRIPTION("Greybus Special APBridgeA Audio Protocol library");
+MODULE_AUTHOR("Mark Greer <mgreer@animalcreek.com>");
diff --git a/drivers/staging/greybus/audio_apbridgea.h b/drivers/staging/greybus/audio_apbridgea.h
new file mode 100644 (file)
index 0000000..b94cb05
--- /dev/null
@@ -0,0 +1,156 @@
+/**
+ * Copyright (c) 2015-2016 Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * This is a special protocol for configuring communication over the
+ * I2S bus between the DSP on the MSM8994 and APBridgeA.  Therefore,
+ * we can predefine several low-level attributes of the communication
+ * because we know that they are supported.  In particular, the following
+ * assumptions are made:
+ *     - there are two channels (i.e., stereo)
+ *     - the low-level protocol is I2S as defined by Philips/NXP
+ *     - the DSP on the MSM8994 is the clock master for MCLK, BCLK, and WCLK
+ *     - WCLK changes on the falling edge of BCLK
+ *     - WCLK low for left channel; high for right channel
+ *     - TX data is sent on the falling edge of BCLK
+ *     - RX data is received/latched on the rising edge of BCLK
+ */
+
+#ifndef __AUDIO_APBRIDGEA_H
+#define __AUDIO_APBRIDGEA_H
+
+#define AUDIO_APBRIDGEA_TYPE_SET_CONFIG                        0x01
+#define AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT            0x02
+#define AUDIO_APBRIDGEA_TYPE_UNREGISTER_CPORT          0x03
+#define AUDIO_APBRIDGEA_TYPE_SET_TX_DATA_SIZE          0x04
+                                                       /* 0x05 unused */
+#define AUDIO_APBRIDGEA_TYPE_PREPARE_TX                        0x06
+#define AUDIO_APBRIDGEA_TYPE_START_TX                  0x07
+#define AUDIO_APBRIDGEA_TYPE_STOP_TX                   0x08
+#define AUDIO_APBRIDGEA_TYPE_SHUTDOWN_TX               0x09
+#define AUDIO_APBRIDGEA_TYPE_SET_RX_DATA_SIZE          0x0a
+                                                       /* 0x0b unused */
+#define AUDIO_APBRIDGEA_TYPE_PREPARE_RX                        0x0c
+#define AUDIO_APBRIDGEA_TYPE_START_RX                  0x0d
+#define AUDIO_APBRIDGEA_TYPE_STOP_RX                   0x0e
+#define AUDIO_APBRIDGEA_TYPE_SHUTDOWN_RX               0x0f
+
+#define AUDIO_APBRIDGEA_PCM_FMT_8                      BIT(0)
+#define AUDIO_APBRIDGEA_PCM_FMT_16                     BIT(1)
+#define AUDIO_APBRIDGEA_PCM_FMT_24                     BIT(2)
+#define AUDIO_APBRIDGEA_PCM_FMT_32                     BIT(3)
+#define AUDIO_APBRIDGEA_PCM_FMT_64                     BIT(4)
+
+#define AUDIO_APBRIDGEA_PCM_RATE_5512                  BIT(0)
+#define AUDIO_APBRIDGEA_PCM_RATE_8000                  BIT(1)
+#define AUDIO_APBRIDGEA_PCM_RATE_11025                 BIT(2)
+#define AUDIO_APBRIDGEA_PCM_RATE_16000                 BIT(3)
+#define AUDIO_APBRIDGEA_PCM_RATE_22050                 BIT(4)
+#define AUDIO_APBRIDGEA_PCM_RATE_32000                 BIT(5)
+#define AUDIO_APBRIDGEA_PCM_RATE_44100                 BIT(6)
+#define AUDIO_APBRIDGEA_PCM_RATE_48000                 BIT(7)
+#define AUDIO_APBRIDGEA_PCM_RATE_64000                 BIT(8)
+#define AUDIO_APBRIDGEA_PCM_RATE_88200                 BIT(9)
+#define AUDIO_APBRIDGEA_PCM_RATE_96000                 BIT(10)
+#define AUDIO_APBRIDGEA_PCM_RATE_176400                        BIT(11)
+#define AUDIO_APBRIDGEA_PCM_RATE_192000                        BIT(12)
+
+#define AUDIO_APBRIDGEA_DIRECTION_TX                   BIT(0)
+#define AUDIO_APBRIDGEA_DIRECTION_RX                   BIT(1)
+
+/* The I2S port is passed in the 'index' parameter of the USB request */
+/* The CPort is passed in the 'value' parameter of the USB request */
+
+struct audio_apbridgea_hdr {
+       __u8    type;
+       __le16  i2s_port;
+       __u8    data[0];
+} __packed;
+
+struct audio_apbridgea_set_config_request {
+       struct audio_apbridgea_hdr      hdr;
+       __le32                          format; /* AUDIO_APBRIDGEA_PCM_FMT_* */
+       __le32                          rate;   /* AUDIO_APBRIDGEA_PCM_RATE_* */
+       __le32                          mclk_freq; /* XXX Remove? */
+} __packed;
+
+struct audio_apbridgea_register_cport_request {
+       struct audio_apbridgea_hdr      hdr;
+       __le16                          cport;
+       __u8                            direction;
+} __packed;
+
+struct audio_apbridgea_unregister_cport_request {
+       struct audio_apbridgea_hdr      hdr;
+       __le16                          cport;
+       __u8                            direction;
+} __packed;
+
+struct audio_apbridgea_set_tx_data_size_request {
+       struct audio_apbridgea_hdr      hdr;
+       __le16                          size;
+} __packed;
+
+struct audio_apbridgea_prepare_tx_request {
+       struct audio_apbridgea_hdr      hdr;
+} __packed;
+
+struct audio_apbridgea_start_tx_request {
+       struct audio_apbridgea_hdr      hdr;
+       __le64                          timestamp;
+} __packed;
+
+struct audio_apbridgea_stop_tx_request {
+       struct audio_apbridgea_hdr      hdr;
+} __packed;
+
+struct audio_apbridgea_shutdown_tx_request {
+       struct audio_apbridgea_hdr      hdr;
+} __packed;
+
+struct audio_apbridgea_set_rx_data_size_request {
+       struct audio_apbridgea_hdr      hdr;
+       __le16                          size;
+} __packed;
+
+struct audio_apbridgea_prepare_rx_request {
+       struct audio_apbridgea_hdr      hdr;
+} __packed;
+
+struct audio_apbridgea_start_rx_request {
+       struct audio_apbridgea_hdr      hdr;
+} __packed;
+
+struct audio_apbridgea_stop_rx_request {
+       struct audio_apbridgea_hdr      hdr;
+} __packed;
+
+struct audio_apbridgea_shutdown_rx_request {
+       struct audio_apbridgea_hdr      hdr;
+} __packed;
+
+#endif /*__AUDIO_APBRIDGEA_H */
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
new file mode 100644 (file)
index 0000000..2f70295
--- /dev/null
@@ -0,0 +1,1132 @@
+/*
+ * APBridge ALSA SoC dummy codec driver
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+#include <uapi/linux/input.h>
+
+#include "audio_codec.h"
+#include "audio_apbridgea.h"
+#include "audio_manager.h"
+
+static struct gbaudio_codec_info *gbcodec;
+
+static struct gbaudio_data_connection *
+find_data(struct gbaudio_module_info *module, int id)
+{
+       struct gbaudio_data_connection *data;
+
+       list_for_each_entry(data, &module->data_list, list) {
+               if (id == data->id)
+                       return data;
+       }
+       return NULL;
+}
+
+static struct gbaudio_stream_params *
+find_dai_stream_params(struct gbaudio_codec_info *codec, int id, int stream)
+{
+       struct gbaudio_codec_dai *dai;
+
+       list_for_each_entry(dai, &codec->dai_list, list) {
+               if (dai->id == id)
+                       return &dai->params[stream];
+       }
+       return NULL;
+}
+
+static int gbaudio_module_enable_tx(struct gbaudio_codec_info *codec,
+                                   struct gbaudio_module_info *module, int id)
+{
+       int module_state, ret = 0;
+       uint16_t data_cport, i2s_port, cportid;
+       uint8_t sig_bits, channels;
+       uint32_t format, rate;
+       struct gbaudio_data_connection *data;
+       struct gbaudio_stream_params *params;
+
+       /* find the dai */
+       data = find_data(module, id);
+       if (!data) {
+               dev_err(module->dev, "%d:DATA connection missing\n", id);
+               return -ENODEV;
+       }
+       module_state = data->state[SNDRV_PCM_STREAM_PLAYBACK];
+
+       params = find_dai_stream_params(codec, id, SNDRV_PCM_STREAM_PLAYBACK);
+       if (!params) {
+               dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+               return -EINVAL;
+       }
+
+       /* register cport */
+       if (module_state < GBAUDIO_CODEC_STARTUP) {
+               i2s_port = 0;   /* fixed for now */
+               cportid = data->connection->hd_cport_id;
+               ret = gb_audio_apbridgea_register_cport(data->connection,
+                                               i2s_port, cportid,
+                                               AUDIO_APBRIDGEA_DIRECTION_TX);
+               if (ret) {
+                       dev_err_ratelimited(module->dev,
+                                           "reg_cport failed:%d\n", ret);
+                       return ret;
+               }
+               data->state[SNDRV_PCM_STREAM_PLAYBACK] =
+                       GBAUDIO_CODEC_STARTUP;
+               dev_dbg(module->dev, "Dynamic Register %d DAI\n", cportid);
+       }
+
+       /* hw_params */
+       if (module_state < GBAUDIO_CODEC_HWPARAMS) {
+               format = params->format;
+               channels = params->channels;
+               rate = params->rate;
+               sig_bits = params->sig_bits;
+               data_cport = data->connection->intf_cport_id;
+               ret = gb_audio_gb_set_pcm(module->mgmt_connection, data_cport,
+                                         format, rate, channels, sig_bits);
+               if (ret) {
+                       dev_err_ratelimited(module->dev, "set_pcm failed:%d\n",
+                                           ret);
+                       return ret;
+               }
+               data->state[SNDRV_PCM_STREAM_PLAYBACK] =
+                       GBAUDIO_CODEC_HWPARAMS;
+               dev_dbg(module->dev, "Dynamic hw_params %d DAI\n", data_cport);
+       }
+
+       /* prepare */
+       if (module_state < GBAUDIO_CODEC_PREPARE) {
+               data_cport = data->connection->intf_cport_id;
+               ret = gb_audio_gb_set_tx_data_size(module->mgmt_connection,
+                                                  data_cport, 192);
+               if (ret) {
+                       dev_err_ratelimited(module->dev,
+                                           "set_tx_data_size failed:%d\n",
+                                           ret);
+                       return ret;
+               }
+               ret = gb_audio_gb_activate_tx(module->mgmt_connection,
+                                             data_cport);
+               if (ret) {
+                       dev_err_ratelimited(module->dev,
+                                           "activate_tx failed:%d\n", ret);
+                       return ret;
+               }
+               data->state[SNDRV_PCM_STREAM_PLAYBACK] =
+                       GBAUDIO_CODEC_PREPARE;
+               dev_dbg(module->dev, "Dynamic prepare %d DAI\n", data_cport);
+       }
+
+       return 0;
+}
+
+static int gbaudio_module_disable_tx(struct gbaudio_module_info *module, int id)
+{
+       int ret;
+       uint16_t data_cport, cportid, i2s_port;
+       int module_state;
+       struct gbaudio_data_connection *data;
+
+       /* find the dai */
+       data = find_data(module, id);
+       if (!data) {
+               dev_err(module->dev, "%d:DATA connection missing\n", id);
+               return -ENODEV;
+       }
+       module_state = data->state[SNDRV_PCM_STREAM_PLAYBACK];
+
+       if (module_state > GBAUDIO_CODEC_HWPARAMS) {
+               data_cport = data->connection->intf_cport_id;
+               ret = gb_audio_gb_deactivate_tx(module->mgmt_connection,
+                                               data_cport);
+               if (ret) {
+                       dev_err_ratelimited(module->dev,
+                                           "deactivate_tx failed:%d\n", ret);
+                       return ret;
+               }
+               dev_dbg(module->dev, "Dynamic deactivate %d DAI\n", data_cport);
+               data->state[SNDRV_PCM_STREAM_PLAYBACK] =
+                       GBAUDIO_CODEC_HWPARAMS;
+       }
+
+       if (module_state > GBAUDIO_CODEC_SHUTDOWN) {
+               i2s_port = 0;   /* fixed for now */
+               cportid = data->connection->hd_cport_id;
+               ret = gb_audio_apbridgea_unregister_cport(data->connection,
+                                               i2s_port, cportid,
+                                               AUDIO_APBRIDGEA_DIRECTION_TX);
+               if (ret) {
+                       dev_err_ratelimited(module->dev,
+                                           "unregister_cport failed:%d\n",
+                                           ret);
+                       return ret;
+               }
+               dev_dbg(module->dev, "Dynamic Unregister %d DAI\n", cportid);
+               data->state[SNDRV_PCM_STREAM_PLAYBACK] =
+                       GBAUDIO_CODEC_SHUTDOWN;
+       }
+
+       return 0;
+}
+
+static int gbaudio_module_enable_rx(struct gbaudio_codec_info *codec,
+                                   struct gbaudio_module_info *module, int id)
+{
+       int module_state, ret = 0;
+       uint16_t data_cport, i2s_port, cportid;
+       uint8_t sig_bits, channels;
+       uint32_t format, rate;
+       struct gbaudio_data_connection *data;
+       struct gbaudio_stream_params *params;
+
+       /* find the dai */
+       data = find_data(module, id);
+       if (!data) {
+               dev_err(module->dev, "%d:DATA connection missing\n", id);
+               return -ENODEV;
+       }
+       module_state = data->state[SNDRV_PCM_STREAM_CAPTURE];
+
+       params = find_dai_stream_params(codec, id, SNDRV_PCM_STREAM_CAPTURE);
+       if (!params) {
+               dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+               return -EINVAL;
+       }
+
+       /* register cport */
+       if (module_state < GBAUDIO_CODEC_STARTUP) {
+               i2s_port = 0;   /* fixed for now */
+               cportid = data->connection->hd_cport_id;
+               ret = gb_audio_apbridgea_register_cport(data->connection,
+                                               i2s_port, cportid,
+                                               AUDIO_APBRIDGEA_DIRECTION_RX);
+               if (ret) {
+                       dev_err_ratelimited(module->dev,
+                                           "reg_cport failed:%d\n", ret);
+                       return ret;
+               }
+               data->state[SNDRV_PCM_STREAM_CAPTURE] =
+                       GBAUDIO_CODEC_STARTUP;
+               dev_dbg(module->dev, "Dynamic Register %d DAI\n", cportid);
+       }
+
+       /* hw_params */
+       if (module_state < GBAUDIO_CODEC_HWPARAMS) {
+               format = params->format;
+               channels = params->channels;
+               rate = params->rate;
+               sig_bits = params->sig_bits;
+               data_cport = data->connection->intf_cport_id;
+               ret = gb_audio_gb_set_pcm(module->mgmt_connection, data_cport,
+                                         format, rate, channels, sig_bits);
+               if (ret) {
+                       dev_err_ratelimited(module->dev, "set_pcm failed:%d\n",
+                                           ret);
+                       return ret;
+               }
+               data->state[SNDRV_PCM_STREAM_CAPTURE] =
+                       GBAUDIO_CODEC_HWPARAMS;
+               dev_dbg(module->dev, "Dynamic hw_params %d DAI\n", data_cport);
+       }
+
+       /* prepare */
+       if (module_state < GBAUDIO_CODEC_PREPARE) {
+               data_cport = data->connection->intf_cport_id;
+               ret = gb_audio_gb_set_rx_data_size(module->mgmt_connection,
+                                                  data_cport, 192);
+               if (ret) {
+                       dev_err_ratelimited(module->dev,
+                                           "set_rx_data_size failed:%d\n",
+                                           ret);
+                       return ret;
+               }
+               ret = gb_audio_gb_activate_rx(module->mgmt_connection,
+                                             data_cport);
+               if (ret) {
+                       dev_err_ratelimited(module->dev,
+                                           "activate_rx failed:%d\n", ret);
+                       return ret;
+               }
+               data->state[SNDRV_PCM_STREAM_CAPTURE] =
+                       GBAUDIO_CODEC_PREPARE;
+               dev_dbg(module->dev, "Dynamic prepare %d DAI\n", data_cport);
+       }
+
+       return 0;
+}
+
+static int gbaudio_module_disable_rx(struct gbaudio_module_info *module, int id)
+{
+       int ret;
+       uint16_t data_cport, cportid, i2s_port;
+       int module_state;
+       struct gbaudio_data_connection *data;
+
+       /* find the dai */
+       data = find_data(module, id);
+       if (!data) {
+               dev_err(module->dev, "%d:DATA connection missing\n", id);
+               return -ENODEV;
+       }
+       module_state = data->state[SNDRV_PCM_STREAM_CAPTURE];
+
+       if (module_state > GBAUDIO_CODEC_HWPARAMS) {
+               data_cport = data->connection->intf_cport_id;
+               ret = gb_audio_gb_deactivate_rx(module->mgmt_connection,
+                                               data_cport);
+               if (ret) {
+                       dev_err_ratelimited(module->dev,
+                                           "deactivate_rx failed:%d\n", ret);
+                       return ret;
+               }
+               dev_dbg(module->dev, "Dynamic deactivate %d DAI\n", data_cport);
+               data->state[SNDRV_PCM_STREAM_CAPTURE] =
+                       GBAUDIO_CODEC_HWPARAMS;
+       }
+
+       if (module_state > GBAUDIO_CODEC_SHUTDOWN) {
+               i2s_port = 0;   /* fixed for now */
+               cportid = data->connection->hd_cport_id;
+               ret = gb_audio_apbridgea_unregister_cport(data->connection,
+                                               i2s_port, cportid,
+                                               AUDIO_APBRIDGEA_DIRECTION_RX);
+               if (ret) {
+                       dev_err_ratelimited(module->dev,
+                                           "unregister_cport failed:%d\n",
+                                           ret);
+                       return ret;
+               }
+               dev_dbg(module->dev, "Dynamic Unregister %d DAI\n", cportid);
+               data->state[SNDRV_PCM_STREAM_CAPTURE] =
+                       GBAUDIO_CODEC_SHUTDOWN;
+       }
+
+       return 0;
+}
+
+int gbaudio_module_update(struct gbaudio_codec_info *codec,
+                         struct snd_soc_dapm_widget *w,
+                         struct gbaudio_module_info *module, int enable)
+{
+       int dai_id, ret;
+       char intf_name[NAME_SIZE], dir[NAME_SIZE];
+
+       dev_dbg(module->dev, "%s:Module update %s sequence\n", w->name,
+               enable ? "Enable":"Disable");
+
+       if ((w->id != snd_soc_dapm_aif_in) && (w->id != snd_soc_dapm_aif_out)){
+               dev_dbg(codec->dev, "No action required for %s\n", w->name);
+               return 0;
+       }
+
+       /* parse dai_id from AIF widget's stream_name */
+       ret = sscanf(w->sname, "%s %d %s", intf_name, &dai_id, dir);
+       if (ret < 3) {
+               dev_err(codec->dev, "Error while parsing dai_id for %s\n",
+                       w->name);
+               return -EINVAL;
+       }
+
+       mutex_lock(&codec->lock);
+       if (w->id == snd_soc_dapm_aif_in) {
+               if (enable)
+                       ret = gbaudio_module_enable_tx(codec, module, dai_id);
+               else
+                       ret = gbaudio_module_disable_tx(module, dai_id);
+       } else if (w->id == snd_soc_dapm_aif_out) {
+               if (enable)
+                       ret = gbaudio_module_enable_rx(codec, module, dai_id);
+               else
+                       ret = gbaudio_module_disable_rx(module, dai_id);
+       }
+
+       mutex_unlock(&codec->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(gbaudio_module_update);
+
+/*
+ * codec DAI ops
+ */
+static int gbcodec_startup(struct snd_pcm_substream *substream,
+                          struct snd_soc_dai *dai)
+{
+       struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+       struct gbaudio_stream_params *params;
+
+       mutex_lock(&codec->lock);
+
+       if (list_empty(&codec->module_list)) {
+               dev_err(codec->dev, "No codec module available\n");
+               mutex_unlock(&codec->lock);
+               return -ENODEV;
+       }
+
+       params = find_dai_stream_params(codec, dai->id, substream->stream);
+       if (!params) {
+               dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+               mutex_unlock(&codec->lock);
+               return -EINVAL;
+       }
+       params->state = GBAUDIO_CODEC_STARTUP;
+       mutex_unlock(&codec->lock);
+       /* to prevent suspend in case of active audio */
+       pm_stay_awake(dai->dev);
+
+       return 0;
+}
+
+static void gbcodec_shutdown(struct snd_pcm_substream *substream,
+                            struct snd_soc_dai *dai)
+{
+       struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+       struct gbaudio_stream_params *params;
+
+       mutex_lock(&codec->lock);
+
+       if (list_empty(&codec->module_list))
+               dev_info(codec->dev, "No codec module available during shutdown\n");
+
+       params = find_dai_stream_params(codec, dai->id, substream->stream);
+       if (!params) {
+               dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+               mutex_unlock(&codec->lock);
+               return;
+       }
+       params->state = GBAUDIO_CODEC_SHUTDOWN;
+       mutex_unlock(&codec->lock);
+       pm_relax(dai->dev);
+       return;
+}
+
+static int gbcodec_hw_params(struct snd_pcm_substream *substream,
+                            struct snd_pcm_hw_params *hwparams,
+                            struct snd_soc_dai *dai)
+{
+       int ret;
+       uint8_t sig_bits, channels;
+       uint32_t format, rate;
+       struct gbaudio_module_info *module;
+       struct gbaudio_data_connection *data;
+       struct gb_bundle *bundle;
+       struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+       struct gbaudio_stream_params *params;
+
+       mutex_lock(&codec->lock);
+
+       if (list_empty(&codec->module_list)) {
+               dev_err(codec->dev, "No codec module available\n");
+               mutex_unlock(&codec->lock);
+               return -ENODEV;
+       }
+
+       /*
+        * assuming, currently only 48000 Hz, 16BIT_LE, stereo
+        * is supported, validate params before configuring codec
+        */
+       if (params_channels(hwparams) != 2) {
+               dev_err(dai->dev, "Invalid channel count:%d\n",
+                       params_channels(hwparams));
+               mutex_unlock(&codec->lock);
+               return -EINVAL;
+       }
+       channels = params_channels(hwparams);
+
+       if (params_rate(hwparams) != 48000) {
+               dev_err(dai->dev, "Invalid sampling rate:%d\n",
+                       params_rate(hwparams));
+               mutex_unlock(&codec->lock);
+               return -EINVAL;
+       }
+       rate = GB_AUDIO_PCM_RATE_48000;
+
+       if (params_format(hwparams) != SNDRV_PCM_FORMAT_S16_LE) {
+               dev_err(dai->dev, "Invalid format:%d\n",
+                       params_format(hwparams));
+               mutex_unlock(&codec->lock);
+               return -EINVAL;
+       }
+       format = GB_AUDIO_PCM_FMT_S16_LE;
+
+       /* find the data connection */
+       list_for_each_entry(module, &codec->module_list, list) {
+               data = find_data(module, dai->id);
+               if (data)
+                       break;
+       }
+
+       if (!data) {
+               dev_err(dai->dev, "DATA connection missing\n");
+               mutex_unlock(&codec->lock);
+               return -EINVAL;
+       }
+
+       params = find_dai_stream_params(codec, dai->id, substream->stream);
+       if (!params) {
+               dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+               mutex_unlock(&codec->lock);
+               return -EINVAL;
+       }
+
+       bundle = to_gb_bundle(module->dev);
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret) {
+               mutex_unlock(&codec->lock);
+               return ret;
+       }
+
+       ret = gb_audio_apbridgea_set_config(data->connection, 0,
+                                           AUDIO_APBRIDGEA_PCM_FMT_16,
+                                           AUDIO_APBRIDGEA_PCM_RATE_48000,
+                                           6144000);
+       if (ret) {
+               dev_err_ratelimited(dai->dev, "%d: Error during set_config\n",
+                                   ret);
+               mutex_unlock(&codec->lock);
+               return ret;
+       }
+
+       gb_pm_runtime_put_noidle(bundle);
+
+       params->state = GBAUDIO_CODEC_HWPARAMS;
+       params->format = format;
+       params->rate = rate;
+       params->channels = channels;
+       params->sig_bits = sig_bits;
+
+       mutex_unlock(&codec->lock);
+       return 0;
+}
+
+static int gbcodec_prepare(struct snd_pcm_substream *substream,
+                          struct snd_soc_dai *dai)
+{
+       int ret;
+       struct gbaudio_module_info *module;
+       struct gbaudio_data_connection *data;
+       struct gb_bundle *bundle;
+       struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+       struct gbaudio_stream_params *params;
+
+       mutex_lock(&codec->lock);
+
+       if (list_empty(&codec->module_list)) {
+               dev_err(codec->dev, "No codec module available\n");
+               mutex_unlock(&codec->lock);
+               return -ENODEV;
+       }
+
+       list_for_each_entry(module, &codec->module_list, list) {
+               /* find the dai */
+               data = find_data(module, dai->id);
+               if (data)
+                       break;
+       }
+       if (!data) {
+               dev_err(dai->dev, "DATA connection missing\n");
+               mutex_unlock(&codec->lock);
+               return -ENODEV;
+       }
+
+       params = find_dai_stream_params(codec, dai->id, substream->stream);
+       if (!params) {
+               dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+               mutex_unlock(&codec->lock);
+               return -EINVAL;
+       }
+
+       bundle = to_gb_bundle(module->dev);
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret) {
+               mutex_unlock(&codec->lock);
+               return ret;
+       }
+
+       switch (substream->stream) {
+       case SNDRV_PCM_STREAM_PLAYBACK:
+               ret = gb_audio_apbridgea_set_tx_data_size(data->connection, 0,
+                                                         192);
+               break;
+       case SNDRV_PCM_STREAM_CAPTURE:
+               ret = gb_audio_apbridgea_set_rx_data_size(data->connection, 0,
+                                                         192);
+               break;
+       }
+       if (ret) {
+               mutex_unlock(&codec->lock);
+               dev_err_ratelimited(dai->dev, "set_data_size failed:%d\n",
+                                    ret);
+               return ret;
+       }
+
+       gb_pm_runtime_put_noidle(bundle);
+
+       params->state = GBAUDIO_CODEC_PREPARE;
+       mutex_unlock(&codec->lock);
+       return 0;
+}
+
+static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
+{
+       int ret;
+       struct gbaudio_data_connection *data;
+       struct gbaudio_module_info *module;
+       struct gb_bundle *bundle;
+       struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+       struct gbaudio_stream_params *params;
+
+
+       dev_dbg(dai->dev, "Mute:%d, Direction:%s\n", mute,
+               stream ? "CAPTURE":"PLAYBACK");
+
+       mutex_lock(&codec->lock);
+
+       params = find_dai_stream_params(codec, dai->id, stream);
+       if (!params) {
+               dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+               mutex_unlock(&codec->lock);
+               return -EINVAL;
+       }
+
+       if (list_empty(&codec->module_list)) {
+               dev_err(codec->dev, "No codec module available\n");
+               if (mute) {
+                       params->state = GBAUDIO_CODEC_STOP;
+                       ret = 0;
+               } else {
+                       ret = -ENODEV;
+               }
+               mutex_unlock(&codec->lock);
+               return ret;
+       }
+
+       list_for_each_entry(module, &codec->module_list, list) {
+               /* find the dai */
+               data = find_data(module, dai->id);
+               if (data)
+                       break;
+       }
+       if (!data) {
+               dev_err(dai->dev, "%s:%s DATA connection missing\n",
+                       dai->name, module->name);
+               mutex_unlock(&codec->lock);
+               return -ENODEV;
+       }
+
+       bundle = to_gb_bundle(module->dev);
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret) {
+               mutex_unlock(&codec->lock);
+               return ret;
+       }
+
+       if (!mute && !stream) {/* start playback */
+               ret = gb_audio_apbridgea_prepare_tx(data->connection,
+                                                   0);
+               if (!ret)
+                       ret = gb_audio_apbridgea_start_tx(data->connection,
+                                                         0, 0);
+               params->state = GBAUDIO_CODEC_START;
+       } else if (!mute && stream) {/* start capture */
+               ret = gb_audio_apbridgea_prepare_rx(data->connection,
+                                                   0);
+               if (!ret)
+                       ret = gb_audio_apbridgea_start_rx(data->connection,
+                                                         0);
+               params->state = GBAUDIO_CODEC_START;
+       } else if (mute && !stream) {/* stop playback */
+               ret = gb_audio_apbridgea_stop_tx(data->connection, 0);
+               if (!ret)
+                       ret = gb_audio_apbridgea_shutdown_tx(data->connection,
+                                                            0);
+               params->state = GBAUDIO_CODEC_STOP;
+       } else if (mute && stream) {/* stop capture */
+               ret = gb_audio_apbridgea_stop_rx(data->connection, 0);
+               if (!ret)
+                       ret = gb_audio_apbridgea_shutdown_rx(data->connection,
+                                                            0);
+               params->state = GBAUDIO_CODEC_STOP;
+       } else
+               ret = -EINVAL;
+       if (ret)
+               dev_err_ratelimited(dai->dev,
+                                   "%s:Error during %s %s stream:%d\n",
+                                   module->name, mute ? "Mute" : "Unmute",
+                                   stream ? "Capture" : "Playback", ret);
+
+       gb_pm_runtime_put_noidle(bundle);
+       mutex_unlock(&codec->lock);
+       return ret;
+}
+
+static struct snd_soc_dai_ops gbcodec_dai_ops = {
+       .startup = gbcodec_startup,
+       .shutdown = gbcodec_shutdown,
+       .hw_params = gbcodec_hw_params,
+       .prepare = gbcodec_prepare,
+       .mute_stream = gbcodec_mute_stream,
+};
+
+static struct snd_soc_dai_driver gbaudio_dai[] = {
+       {
+               .name = "apb-i2s0",
+               .id = 0,
+               .playback = {
+                       .stream_name = "I2S 0 Playback",
+                       .rates = SNDRV_PCM_RATE_48000,
+                       .formats = SNDRV_PCM_FORMAT_S16_LE,
+                       .rate_max = 48000,
+                       .rate_min = 48000,
+                       .channels_min = 1,
+                       .channels_max = 2,
+               },
+               .capture = {
+                       .stream_name = "I2S 0 Capture",
+                       .rates = SNDRV_PCM_RATE_48000,
+                       .formats = SNDRV_PCM_FORMAT_S16_LE,
+                       .rate_max = 48000,
+                       .rate_min = 48000,
+                       .channels_min = 1,
+                       .channels_max = 2,
+               },
+               .ops = &gbcodec_dai_ops,
+       },
+};
+
+static int gbaudio_init_jack(struct gbaudio_module_info *module,
+                            struct snd_soc_codec *codec)
+{
+       int ret;
+
+       if (!module->jack_mask)
+               return 0;
+
+       snprintf(module->jack_name, NAME_SIZE, "GB %d Headset Jack",
+                module->dev_id);
+       ret = snd_soc_jack_new(codec, module->jack_name, module->jack_mask,
+                              &module->headset_jack);
+       if (ret) {
+               dev_err(module->dev, "Failed to create new jack\n");
+               return ret;
+       }
+
+       if (!module->button_mask)
+               return 0;
+
+       snprintf(module->button_name, NAME_SIZE, "GB %d Button Jack",
+                module->dev_id);
+       ret = snd_soc_jack_new(codec, module->button_name, module->button_mask,
+                              &module->button_jack);
+       if (ret) {
+               dev_err(module->dev, "Failed to create button jack\n");
+               return ret;
+       }
+
+       /*
+        * Currently, max 4 buttons are supported with following key mapping
+        * BTN_0 = KEY_MEDIA
+        * BTN_1 = KEY_VOICECOMMAND
+        * BTN_2 = KEY_VOLUMEUP
+        * BTN_3 = KEY_VOLUMEDOWN
+        */
+
+       if (module->button_mask & SND_JACK_BTN_0) {
+               ret = snd_jack_set_key(module->button_jack.jack, SND_JACK_BTN_0,
+                                      KEY_MEDIA);
+               if (ret) {
+                       dev_err(module->dev, "Failed to set BTN_0\n");
+                       return ret;
+               }
+       }
+
+       if (module->button_mask & SND_JACK_BTN_1) {
+               ret = snd_jack_set_key(module->button_jack.jack, SND_JACK_BTN_1,
+                                      KEY_VOICECOMMAND);
+               if (ret) {
+                       dev_err(module->dev, "Failed to set BTN_1\n");
+                       return ret;
+               }
+       }
+
+       if (module->button_mask & SND_JACK_BTN_2) {
+               ret = snd_jack_set_key(module->button_jack.jack, SND_JACK_BTN_2,
+                                      KEY_VOLUMEUP);
+               if (ret) {
+                       dev_err(module->dev, "Failed to set BTN_2\n");
+                       return ret;
+               }
+       }
+
+       if (module->button_mask & SND_JACK_BTN_3) {
+               ret = snd_jack_set_key(module->button_jack.jack, SND_JACK_BTN_3,
+                                      KEY_VOLUMEDOWN);
+               if (ret) {
+                       dev_err(module->dev, "Failed to set BTN_0\n");
+                       return ret;
+               }
+       }
+
+       /* FIXME
+        * verify if this is really required
+       set_bit(INPUT_PROP_NO_DUMMY_RELEASE,
+               module->button_jack.jack->input_dev->propbit);
+       */
+
+       return 0;
+}
+
+int gbaudio_register_module(struct gbaudio_module_info *module)
+{
+       int ret;
+       struct snd_soc_codec *codec;
+       struct snd_card *card;
+       struct snd_soc_jack *jack = NULL;
+
+       if (!gbcodec) {
+               dev_err(module->dev, "GB Codec not yet probed\n");
+               return -EAGAIN;
+       }
+
+       codec = gbcodec->codec;
+       card = codec->card->snd_card;
+
+       down_write(&card->controls_rwsem);
+
+       if (module->num_dais) {
+               dev_err(gbcodec->dev,
+                       "%d:DAIs not supported via gbcodec driver\n",
+                       module->num_dais);
+               up_write(&card->controls_rwsem);
+               return -EINVAL;
+       }
+
+       ret = gbaudio_init_jack(module, codec);
+       if (ret) {
+               up_write(&card->controls_rwsem);
+               return ret;
+       }
+
+       if (module->dapm_widgets)
+               snd_soc_dapm_new_controls(&codec->dapm, module->dapm_widgets,
+                                         module->num_dapm_widgets);
+       if (module->controls)
+               snd_soc_add_codec_controls(codec, module->controls,
+                                    module->num_controls);
+       if (module->dapm_routes)
+               snd_soc_dapm_add_routes(&codec->dapm, module->dapm_routes,
+                                       module->num_dapm_routes);
+
+       /* card already instantiated, create widgets here only */
+       if (codec->card->instantiated) {
+               snd_soc_dapm_link_component_dai_widgets(codec->card,
+                                                       &codec->dapm);
+#ifdef CONFIG_SND_JACK
+               /* register jack devices for this module from codec->jack_list */
+               list_for_each_entry(jack, &codec->jack_list, list) {
+                       if ((jack == &module->headset_jack)
+                           || (jack == &module->button_jack))
+                               snd_device_register(codec->card->snd_card,
+                                                   jack->jack);
+               }
+#endif
+       }
+
+       mutex_lock(&gbcodec->lock);
+       list_add(&module->list, &gbcodec->module_list);
+       mutex_unlock(&gbcodec->lock);
+
+       if (codec->card->instantiated)
+               ret = snd_soc_dapm_new_widgets(&codec->dapm);
+       dev_dbg(codec->dev, "Registered %s module\n", module->name);
+
+       up_write(&card->controls_rwsem);
+       return ret;
+}
+EXPORT_SYMBOL(gbaudio_register_module);
+
+static void gbaudio_codec_clean_data_tx(struct gbaudio_data_connection *data)
+{
+       uint16_t i2s_port, cportid;
+       int ret;
+
+       if (list_is_singular(&gbcodec->module_list)) {
+               ret = gb_audio_apbridgea_stop_tx(data->connection, 0);
+               if (ret)
+                       return;
+               ret = gb_audio_apbridgea_shutdown_tx(data->connection,
+                                                    0);
+               if (ret)
+                       return;
+       }
+       i2s_port = 0;   /* fixed for now */
+       cportid = data->connection->hd_cport_id;
+       ret = gb_audio_apbridgea_unregister_cport(data->connection,
+                                                 i2s_port, cportid,
+                                                 AUDIO_APBRIDGEA_DIRECTION_TX);
+       data->state[0] = GBAUDIO_CODEC_SHUTDOWN;
+}
+
+static void gbaudio_codec_clean_data_rx(struct gbaudio_data_connection *data)
+{
+       uint16_t i2s_port, cportid;
+       int ret;
+
+       if (list_is_singular(&gbcodec->module_list)) {
+               ret = gb_audio_apbridgea_stop_rx(data->connection, 0);
+               if (ret)
+                       return;
+               ret = gb_audio_apbridgea_shutdown_rx(data->connection,
+                                                    0);
+               if (ret)
+                       return;
+       }
+       i2s_port = 0;   /* fixed for now */
+       cportid = data->connection->hd_cport_id;
+       ret = gb_audio_apbridgea_unregister_cport(data->connection,
+                                                 i2s_port, cportid,
+                                                 AUDIO_APBRIDGEA_DIRECTION_RX);
+       data->state[1] = GBAUDIO_CODEC_SHUTDOWN;
+}
+
+
+static void gbaudio_codec_cleanup(struct gbaudio_module_info *module)
+{
+       struct gbaudio_data_connection *data;
+       int pb_state, cap_state;
+
+       dev_dbg(gbcodec->dev, "%s: removed, cleanup APBridge\n", module->name);
+       list_for_each_entry(data, &module->data_list, list) {
+               pb_state = data->state[0];
+               cap_state = data->state[1];
+
+               if (pb_state > GBAUDIO_CODEC_SHUTDOWN)
+                       gbaudio_codec_clean_data_tx(data);
+
+               if (cap_state > GBAUDIO_CODEC_SHUTDOWN)
+                       gbaudio_codec_clean_data_rx(data);
+
+       }
+}
+
+void gbaudio_unregister_module(struct gbaudio_module_info *module)
+{
+       struct snd_soc_codec *codec = gbcodec->codec;
+       struct snd_card *card = codec->card->snd_card;
+       struct snd_soc_jack *jack, *next_j;
+       int mask;
+
+       dev_dbg(codec->dev, "Unregister %s module\n", module->name);
+
+       down_write(&card->controls_rwsem);
+       mutex_lock(&gbcodec->lock);
+       gbaudio_codec_cleanup(module);
+       list_del(&module->list);
+       dev_dbg(codec->dev, "Process Unregister %s module\n", module->name);
+       mutex_unlock(&gbcodec->lock);
+
+#ifdef CONFIG_SND_JACK
+       /* free jack devices for this module from codec->jack_list */
+       list_for_each_entry_safe(jack, next_j, &codec->jack_list, list) {
+               if (jack == &module->headset_jack)
+                       mask = GBCODEC_JACK_MASK;
+               else if (jack == &module->button_jack)
+                       mask = GBCODEC_JACK_BUTTON_MASK;
+               else
+                       mask = 0;
+               if (mask) {
+                       dev_dbg(module->dev, "Report %s removal\n",
+                               jack->jack->id);
+                       snd_soc_jack_report(jack, 0, mask);
+                       snd_device_free(codec->card->snd_card, jack->jack);
+                       list_del(&jack->list);
+               }
+       }
+#endif
+
+       if (module->dapm_routes) {
+               dev_dbg(codec->dev, "Removing %d routes\n",
+                       module->num_dapm_routes);
+               snd_soc_dapm_del_routes(&codec->dapm, module->dapm_routes,
+                                       module->num_dapm_routes);
+       }
+       if (module->controls) {
+               dev_dbg(codec->dev, "Removing %d controls\n",
+                       module->num_controls);
+               snd_soc_remove_codec_controls(codec, module->controls,
+                                         module->num_controls);
+       }
+       if (module->dapm_widgets) {
+               dev_dbg(codec->dev, "Removing %d widgets\n",
+                       module->num_dapm_widgets);
+               snd_soc_dapm_free_controls(&codec->dapm, module->dapm_widgets,
+                                          module->num_dapm_widgets);
+       }
+
+       dev_dbg(codec->dev, "Unregistered %s module\n", module->name);
+
+       up_write(&card->controls_rwsem);
+}
+EXPORT_SYMBOL(gbaudio_unregister_module);
+
+/*
+ * codec driver ops
+ */
+static int gbcodec_probe(struct snd_soc_codec *codec)
+{
+       int i;
+       struct gbaudio_codec_info *info;
+       struct gbaudio_codec_dai *dai;
+
+       info = devm_kzalloc(codec->dev, sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       info->dev = codec->dev;
+       INIT_LIST_HEAD(&info->module_list);
+       mutex_init(&info->lock);
+       INIT_LIST_HEAD(&info->dai_list);
+
+       /* init dai_list used to maintain runtime stream info */
+       for (i = 0; i < ARRAY_SIZE(gbaudio_dai); i++) {
+               dai = devm_kzalloc(codec->dev, sizeof(*dai), GFP_KERNEL);
+               if (!dai)
+                       return -ENOMEM;
+               dai->id = gbaudio_dai[i].id;
+               list_add(&dai->list, &info->dai_list);
+       }
+
+       info->codec = codec;
+       snd_soc_codec_set_drvdata(codec, info);
+       gbcodec = info;
+
+        device_init_wakeup(codec->dev, 1);
+       return 0;
+}
+
+static int gbcodec_remove(struct snd_soc_codec *codec)
+{
+       /* Empty function for now */
+       return 0;
+}
+
+static u8 gbcodec_reg[GBCODEC_REG_COUNT] = {
+       [GBCODEC_CTL_REG] = GBCODEC_CTL_REG_DEFAULT,
+       [GBCODEC_MUTE_REG] = GBCODEC_MUTE_REG_DEFAULT,
+       [GBCODEC_PB_LVOL_REG] = GBCODEC_PB_VOL_REG_DEFAULT,
+       [GBCODEC_PB_RVOL_REG] = GBCODEC_PB_VOL_REG_DEFAULT,
+       [GBCODEC_CAP_LVOL_REG] = GBCODEC_CAP_VOL_REG_DEFAULT,
+       [GBCODEC_CAP_RVOL_REG] = GBCODEC_CAP_VOL_REG_DEFAULT,
+       [GBCODEC_APB1_MUX_REG] = GBCODEC_APB1_MUX_REG_DEFAULT,
+       [GBCODEC_APB2_MUX_REG] = GBCODEC_APB2_MUX_REG_DEFAULT,
+};
+
+static int gbcodec_write(struct snd_soc_codec *codec, unsigned int reg,
+                        unsigned int value)
+{
+       int ret = 0;
+
+       if (reg == SND_SOC_NOPM)
+               return 0;
+
+       BUG_ON(reg >= GBCODEC_REG_COUNT);
+
+       gbcodec_reg[reg] = value;
+       dev_dbg(codec->dev, "reg[%d] = 0x%x\n", reg, value);
+
+       return ret;
+}
+
+static unsigned int gbcodec_read(struct snd_soc_codec *codec,
+                                unsigned int reg)
+{
+       unsigned int val = 0;
+
+       if (reg == SND_SOC_NOPM)
+               return 0;
+
+       BUG_ON(reg >= GBCODEC_REG_COUNT);
+
+       val = gbcodec_reg[reg];
+       dev_dbg(codec->dev, "reg[%d] = 0x%x\n", reg, val);
+
+       return val;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_gbaudio = {
+       .probe  = gbcodec_probe,
+       .remove = gbcodec_remove,
+
+       .read = gbcodec_read,
+       .write = gbcodec_write,
+
+       .reg_cache_size = GBCODEC_REG_COUNT,
+       .reg_cache_default = gbcodec_reg_defaults,
+       .reg_word_size = 1,
+
+       .idle_bias_off = true,
+       .ignore_pmdown_time = 1,
+};
+
+#ifdef CONFIG_PM
+static int gbaudio_codec_suspend(struct device *dev)
+{
+       dev_dbg(dev, "%s: suspend\n", __func__);
+       return 0;
+}
+
+static int gbaudio_codec_resume(struct device *dev)
+{
+       dev_dbg(dev, "%s: resume\n", __func__);
+       return 0;
+}
+
+static const struct dev_pm_ops gbaudio_codec_pm_ops = {
+       .suspend        = gbaudio_codec_suspend,
+       .resume         = gbaudio_codec_resume,
+};
+#endif
+
+static int gbaudio_codec_probe(struct platform_device *pdev)
+{
+       return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_gbaudio,
+                       gbaudio_dai, ARRAY_SIZE(gbaudio_dai));
+}
+
+static int gbaudio_codec_remove(struct platform_device *pdev)
+{
+       snd_soc_unregister_codec(&pdev->dev);
+       return 0;
+}
+
+static const struct of_device_id greybus_asoc_machine_of_match[]  = {
+       { .compatible = "toshiba,apb-dummy-codec", },
+       {},
+};
+
+static struct platform_driver gbaudio_codec_driver = {
+       .driver = {
+               .name = "apb-dummy-codec",
+               .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+               .pm = &gbaudio_codec_pm_ops,
+#endif
+               .of_match_table = greybus_asoc_machine_of_match,
+       },
+       .probe = gbaudio_codec_probe,
+       .remove = gbaudio_codec_remove,
+};
+module_platform_driver(gbaudio_codec_driver);
+
+MODULE_DESCRIPTION("APBridge ALSA SoC dummy codec driver");
+MODULE_AUTHOR("Vaibhav Agarwal <vaibhav.agarwal@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:apb-dummy-codec");
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
new file mode 100644 (file)
index 0000000..0a86459
--- /dev/null
@@ -0,0 +1,283 @@
+/*
+ * Greybus audio driver
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __LINUX_GBAUDIO_CODEC_H
+#define __LINUX_GBAUDIO_CODEC_H
+
+#include <sound/soc.h>
+#include <sound/jack.h>
+
+#include "greybus.h"
+#include "greybus_protocols.h"
+
+#define NAME_SIZE      32
+#define MAX_DAIS       2       /* APB1, APB2 */
+
+enum {
+       APB1_PCM = 0,
+       APB2_PCM,
+       NUM_CODEC_DAIS,
+};
+
+enum gbcodec_reg_index {
+       GBCODEC_CTL_REG,
+       GBCODEC_MUTE_REG,
+       GBCODEC_PB_LVOL_REG,
+       GBCODEC_PB_RVOL_REG,
+       GBCODEC_CAP_LVOL_REG,
+       GBCODEC_CAP_RVOL_REG,
+       GBCODEC_APB1_MUX_REG,
+       GBCODEC_APB2_MUX_REG,
+       GBCODEC_REG_COUNT
+};
+
+/* device_type should be same as defined in audio.h (Android media layer) */
+enum {
+       GBAUDIO_DEVICE_NONE                     = 0x0,
+       /* reserved bits */
+       GBAUDIO_DEVICE_BIT_IN                   = 0x80000000,
+       GBAUDIO_DEVICE_BIT_DEFAULT              = 0x40000000,
+       /* output devices */
+       GBAUDIO_DEVICE_OUT_SPEAKER              = 0x2,
+       GBAUDIO_DEVICE_OUT_WIRED_HEADSET        = 0x4,
+       GBAUDIO_DEVICE_OUT_WIRED_HEADPHONE      = 0x8,
+       /* input devices */
+       GBAUDIO_DEVICE_IN_BUILTIN_MIC           = GBAUDIO_DEVICE_BIT_IN | 0x4,
+       GBAUDIO_DEVICE_IN_WIRED_HEADSET         = GBAUDIO_DEVICE_BIT_IN | 0x10,
+};
+
+/* bit 0-SPK, 1-HP, 2-DAC,
+ * 4-MIC, 5-HSMIC, 6-MIC2
+ */
+#define GBCODEC_CTL_REG_DEFAULT                0x00
+
+/* bit 0,1 - APB1-PB-L/R
+ * bit 2,3 - APB2-PB-L/R
+ * bit 4,5 - APB1-Cap-L/R
+ * bit 6,7 - APB2-Cap-L/R
+ */
+#define        GBCODEC_MUTE_REG_DEFAULT        0x00
+
+/* 0-127 steps */
+#define        GBCODEC_PB_VOL_REG_DEFAULT      0x00
+#define        GBCODEC_CAP_VOL_REG_DEFAULT     0x00
+
+/* bit 0,1,2 - PB stereo, left, right
+ * bit 8,9,10 - Cap stereo, left, right
+ */
+#define GBCODEC_APB1_MUX_REG_DEFAULT   0x00
+#define GBCODEC_APB2_MUX_REG_DEFAULT   0x00
+
+#define GBCODEC_JACK_MASK              0x0000FFFF
+#define GBCODEC_JACK_BUTTON_MASK       0xFFFF0000
+
+static const u8 gbcodec_reg_defaults[GBCODEC_REG_COUNT] = {
+       GBCODEC_CTL_REG_DEFAULT,
+       GBCODEC_MUTE_REG_DEFAULT,
+       GBCODEC_PB_VOL_REG_DEFAULT,
+       GBCODEC_PB_VOL_REG_DEFAULT,
+       GBCODEC_CAP_VOL_REG_DEFAULT,
+       GBCODEC_CAP_VOL_REG_DEFAULT,
+       GBCODEC_APB1_MUX_REG_DEFAULT,
+       GBCODEC_APB2_MUX_REG_DEFAULT,
+};
+
+enum gbaudio_codec_state {
+       GBAUDIO_CODEC_SHUTDOWN = 0,
+       GBAUDIO_CODEC_STARTUP,
+       GBAUDIO_CODEC_HWPARAMS,
+       GBAUDIO_CODEC_PREPARE,
+       GBAUDIO_CODEC_START,
+       GBAUDIO_CODEC_STOP,
+};
+
+struct gbaudio_stream_params {
+       int state;
+       uint8_t sig_bits, channels;
+       uint32_t format, rate;
+};
+
+struct gbaudio_codec_dai {
+       int id;
+       /* runtime params for playback/capture streams */
+       struct gbaudio_stream_params params[2];
+       struct list_head list;
+};
+
+struct gbaudio_codec_info {
+       struct device *dev;
+       struct snd_soc_codec *codec;
+       struct list_head module_list;
+       /* to maintain runtime stream params for each DAI */
+       struct list_head dai_list;
+       struct mutex lock;
+       u8 reg[GBCODEC_REG_COUNT];
+};
+
+struct gbaudio_widget {
+       __u8 id;
+       const char *name;
+       struct list_head list;
+};
+
+struct gbaudio_control {
+       __u8 id;
+       char *name;
+       char *wname;
+       const char * const *texts;
+       int items;
+       struct list_head list;
+};
+
+struct gbaudio_data_connection {
+       int id;
+       __le16 data_cport;
+       struct gb_connection *connection;
+       struct list_head list;
+       /* maintain runtime state for playback/capture stream */
+       int state[2];
+};
+
+/* stream direction */
+#define GB_PLAYBACK    BIT(0)
+#define GB_CAPTURE     BIT(1)
+
+enum gbaudio_module_state {
+       GBAUDIO_MODULE_OFF = 0,
+       GBAUDIO_MODULE_ON,
+};
+
+struct gbaudio_module_info {
+       /* module info */
+       struct device *dev;
+       int dev_id;     /* check if it should be bundle_id/hd_cport_id */
+       int vid;
+       int pid;
+       int slot;
+       int type;
+       int set_uevent;
+       char vstr[NAME_SIZE];
+       char pstr[NAME_SIZE];
+       struct list_head list;
+       /* need to share this info to above user space */
+       int manager_id;
+       char name[NAME_SIZE];
+       unsigned int ip_devices;
+       unsigned int op_devices;
+
+       /* jack related */
+       char jack_name[NAME_SIZE];
+       char button_name[NAME_SIZE];
+       int jack_type;
+       int jack_mask;
+       int button_mask;
+       int button_status;
+       struct snd_soc_jack headset_jack;
+       struct snd_soc_jack button_jack;
+
+       /* connection info */
+       struct gb_connection *mgmt_connection;
+       size_t num_data_connections;
+       struct list_head data_list;
+
+       /* topology related */
+       int num_dais;
+       int num_controls;
+       int num_dapm_widgets;
+       int num_dapm_routes;
+       unsigned long dai_offset;
+       unsigned long widget_offset;
+       unsigned long control_offset;
+       unsigned long route_offset;
+       struct snd_kcontrol_new *controls;
+       struct snd_soc_dapm_widget *dapm_widgets;
+       struct snd_soc_dapm_route *dapm_routes;
+       struct snd_soc_dai_driver *dais;
+
+       struct list_head widget_list;
+       struct list_head ctl_list;
+       struct list_head widget_ctl_list;
+
+       struct gb_audio_topology *topology;
+};
+
+int gbaudio_tplg_parse_data(struct gbaudio_module_info *module,
+                              struct gb_audio_topology *tplg_data);
+void gbaudio_tplg_release(struct gbaudio_module_info *module);
+
+int gbaudio_module_update(struct gbaudio_codec_info *codec,
+                         struct snd_soc_dapm_widget *w,
+                         struct gbaudio_module_info *module,
+                         int enable);
+int gbaudio_register_module(struct gbaudio_module_info *module);
+void gbaudio_unregister_module(struct gbaudio_module_info *module);
+
+/* protocol related */
+extern int gb_audio_gb_get_topology(struct gb_connection *connection,
+                                   struct gb_audio_topology **topology);
+extern int gb_audio_gb_get_control(struct gb_connection *connection,
+                                  uint8_t control_id, uint8_t index,
+                                  struct gb_audio_ctl_elem_value *value);
+extern int gb_audio_gb_set_control(struct gb_connection *connection,
+                                  uint8_t control_id, uint8_t index,
+                                  struct gb_audio_ctl_elem_value *value);
+extern int gb_audio_gb_enable_widget(struct gb_connection *connection,
+                                    uint8_t widget_id);
+extern int gb_audio_gb_disable_widget(struct gb_connection *connection,
+                                     uint8_t widget_id);
+extern int gb_audio_gb_get_pcm(struct gb_connection *connection,
+                              uint16_t data_cport, uint32_t *format,
+                              uint32_t *rate, uint8_t *channels,
+                              uint8_t *sig_bits);
+extern int gb_audio_gb_set_pcm(struct gb_connection *connection,
+                              uint16_t data_cport, uint32_t format,
+                              uint32_t rate, uint8_t channels,
+                              uint8_t sig_bits);
+extern int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
+                                       uint16_t data_cport, uint16_t size);
+extern int gb_audio_gb_activate_tx(struct gb_connection *connection,
+                                  uint16_t data_cport);
+extern int gb_audio_gb_deactivate_tx(struct gb_connection *connection,
+                                    uint16_t data_cport);
+extern int gb_audio_gb_set_rx_data_size(struct gb_connection *connection,
+                                       uint16_t data_cport, uint16_t size);
+extern int gb_audio_gb_activate_rx(struct gb_connection *connection,
+                                  uint16_t data_cport);
+extern int gb_audio_gb_deactivate_rx(struct gb_connection *connection,
+                                    uint16_t data_cport);
+extern int gb_audio_apbridgea_set_config(struct gb_connection *connection,
+                                        __u16 i2s_port, __u32 format,
+                                        __u32 rate, __u32 mclk_freq);
+extern int gb_audio_apbridgea_register_cport(struct gb_connection *connection,
+                                            __u16 i2s_port, __u16 cportid,
+                                            __u8 direction);
+extern int gb_audio_apbridgea_unregister_cport(struct gb_connection *connection,
+                                              __u16 i2s_port, __u16 cportid,
+                                              __u8 direction);
+extern int gb_audio_apbridgea_set_tx_data_size(struct gb_connection *connection,
+                                              __u16 i2s_port, __u16 size);
+extern int gb_audio_apbridgea_prepare_tx(struct gb_connection *connection,
+                                        __u16 i2s_port);
+extern int gb_audio_apbridgea_start_tx(struct gb_connection *connection,
+                                      __u16 i2s_port, __u64 timestamp);
+extern int gb_audio_apbridgea_stop_tx(struct gb_connection *connection,
+                                     __u16 i2s_port);
+extern int gb_audio_apbridgea_shutdown_tx(struct gb_connection *connection,
+                                         __u16 i2s_port);
+extern int gb_audio_apbridgea_set_rx_data_size(struct gb_connection *connection,
+                                              __u16 i2s_port, __u16 size);
+extern int gb_audio_apbridgea_prepare_rx(struct gb_connection *connection,
+                                        __u16 i2s_port);
+extern int gb_audio_apbridgea_start_rx(struct gb_connection *connection,
+                                      __u16 i2s_port);
+extern int gb_audio_apbridgea_stop_rx(struct gb_connection *connection,
+                                     __u16 i2s_port);
+extern int gb_audio_apbridgea_shutdown_rx(struct gb_connection *connection,
+                                         __u16 i2s_port);
+
+#endif /* __LINUX_GBAUDIO_CODEC_H */
diff --git a/drivers/staging/greybus/audio_gb.c b/drivers/staging/greybus/audio_gb.c
new file mode 100644 (file)
index 0000000..a2f1c92
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+ * Greybus Audio Device Class Protocol helpers
+ *
+ * Copyright 2015-2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+#include "greybus_protocols.h"
+#include "operation.h"
+#include "audio_codec.h"
+
+/* TODO: Split into separate calls */
+int gb_audio_gb_get_topology(struct gb_connection *connection,
+                            struct gb_audio_topology **topology)
+{
+       struct gb_audio_get_topology_size_response size_resp;
+       struct gb_audio_topology *topo;
+       uint16_t size;
+       int ret;
+
+       ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE,
+                               NULL, 0, &size_resp, sizeof(size_resp));
+       if (ret)
+               return ret;
+
+       size = le16_to_cpu(size_resp.size);
+       if (size < sizeof(*topo))
+               return -ENODATA;
+
+       topo = kzalloc(size, GFP_KERNEL);
+       if (!topo)
+               return -ENOMEM;
+
+       ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_TOPOLOGY, NULL, 0,
+                               topo, size);
+       if (ret) {
+               kfree(topo);
+               return ret;
+       }
+
+       *topology = topo;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_get_topology);
+
+int gb_audio_gb_get_control(struct gb_connection *connection,
+                           uint8_t control_id, uint8_t index,
+                           struct gb_audio_ctl_elem_value *value)
+{
+       struct gb_audio_get_control_request req;
+       struct gb_audio_get_control_response resp;
+       int ret;
+
+       req.control_id = control_id;
+       req.index = index;
+
+       ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_CONTROL,
+                               &req, sizeof(req), &resp, sizeof(resp));
+       if (ret)
+               return ret;
+
+       memcpy(value, &resp.value, sizeof(*value));
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_get_control);
+
+int gb_audio_gb_set_control(struct gb_connection *connection,
+                           uint8_t control_id, uint8_t index,
+                           struct gb_audio_ctl_elem_value *value)
+{
+       struct gb_audio_set_control_request req;
+
+       req.control_id = control_id;
+       req.index = index;
+       memcpy(&req.value, value, sizeof(req.value));
+
+       return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_CONTROL,
+                                &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_set_control);
+
+int gb_audio_gb_enable_widget(struct gb_connection *connection,
+                             uint8_t widget_id)
+{
+       struct gb_audio_enable_widget_request req;
+
+       req.widget_id = widget_id;
+
+       return gb_operation_sync(connection, GB_AUDIO_TYPE_ENABLE_WIDGET,
+                                &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_enable_widget);
+
+int gb_audio_gb_disable_widget(struct gb_connection *connection,
+                              uint8_t widget_id)
+{
+       struct gb_audio_disable_widget_request req;
+
+       req.widget_id = widget_id;
+
+       return gb_operation_sync(connection, GB_AUDIO_TYPE_DISABLE_WIDGET,
+                                &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_disable_widget);
+
+int gb_audio_gb_get_pcm(struct gb_connection *connection, uint16_t data_cport,
+                       uint32_t *format, uint32_t *rate, uint8_t *channels,
+                       uint8_t *sig_bits)
+{
+       struct gb_audio_get_pcm_request req;
+       struct gb_audio_get_pcm_response resp;
+       int ret;
+
+       req.data_cport = cpu_to_le16(data_cport);
+
+       ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_PCM,
+                               &req, sizeof(req), &resp, sizeof(resp));
+       if (ret)
+               return ret;
+
+       *format = le32_to_cpu(resp.format);
+       *rate = le32_to_cpu(resp.rate);
+       *channels = resp.channels;
+       *sig_bits = resp.sig_bits;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_get_pcm);
+
+int gb_audio_gb_set_pcm(struct gb_connection *connection, uint16_t data_cport,
+                       uint32_t format, uint32_t rate, uint8_t channels,
+                       uint8_t sig_bits)
+{
+       struct gb_audio_set_pcm_request req;
+
+       req.data_cport = cpu_to_le16(data_cport);
+       req.format = cpu_to_le32(format);
+       req.rate = cpu_to_le32(rate);
+       req.channels = channels;
+       req.sig_bits = sig_bits;
+
+       return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_PCM,
+                                &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_set_pcm);
+
+int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
+                                uint16_t data_cport, uint16_t size)
+{
+       struct gb_audio_set_tx_data_size_request req;
+
+       req.data_cport = cpu_to_le16(data_cport);
+       req.size = cpu_to_le16(size);
+
+       return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_TX_DATA_SIZE,
+                                &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_set_tx_data_size);
+
+int gb_audio_gb_activate_tx(struct gb_connection *connection,
+                           uint16_t data_cport)
+{
+       struct gb_audio_activate_tx_request req;
+
+       req.data_cport = cpu_to_le16(data_cport);
+
+       return gb_operation_sync(connection, GB_AUDIO_TYPE_ACTIVATE_TX,
+                                &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_activate_tx);
+
+int gb_audio_gb_deactivate_tx(struct gb_connection *connection,
+                             uint16_t data_cport)
+{
+       struct gb_audio_deactivate_tx_request req;
+
+       req.data_cport = cpu_to_le16(data_cport);
+
+       return gb_operation_sync(connection, GB_AUDIO_TYPE_DEACTIVATE_TX,
+                                &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_deactivate_tx);
+
+int gb_audio_gb_set_rx_data_size(struct gb_connection *connection,
+                                uint16_t data_cport, uint16_t size)
+{
+       struct gb_audio_set_rx_data_size_request req;
+
+       req.data_cport = cpu_to_le16(data_cport);
+       req.size = cpu_to_le16(size);
+
+       return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_RX_DATA_SIZE,
+                                &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_set_rx_data_size);
+
+int gb_audio_gb_activate_rx(struct gb_connection *connection,
+                           uint16_t data_cport)
+{
+       struct gb_audio_activate_rx_request req;
+
+       req.data_cport = cpu_to_le16(data_cport);
+
+       return gb_operation_sync(connection, GB_AUDIO_TYPE_ACTIVATE_RX,
+                                &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_activate_rx);
+
+int gb_audio_gb_deactivate_rx(struct gb_connection *connection,
+                             uint16_t data_cport)
+{
+       struct gb_audio_deactivate_rx_request req;
+
+       req.data_cport = cpu_to_le16(data_cport);
+
+       return gb_operation_sync(connection, GB_AUDIO_TYPE_DEACTIVATE_RX,
+                                &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_deactivate_rx);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("greybus:audio-gb");
+MODULE_DESCRIPTION("Greybus Audio Device Class Protocol library");
+MODULE_AUTHOR("Mark Greer <mgreer@animalcreek.com>");
diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
new file mode 100644 (file)
index 0000000..aa6508b
--- /dev/null
@@ -0,0 +1,184 @@
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rwlock.h>
+#include <linux/idr.h>
+
+#include "audio_manager.h"
+#include "audio_manager_private.h"
+
+static struct kset *manager_kset;
+
+static LIST_HEAD(modules_list);
+static DECLARE_RWSEM(modules_rwsem);
+static DEFINE_IDA(module_id);
+
+/* helpers */
+static struct gb_audio_manager_module *gb_audio_manager_get_locked(int id)
+{
+       struct gb_audio_manager_module *module;
+
+       if (id < 0)
+               return NULL;
+
+       list_for_each_entry(module, &modules_list, list) {
+               if (module->id == id)
+                       return module;
+       }
+
+       return NULL;
+}
+
+/* public API */
+int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc)
+{
+       struct gb_audio_manager_module *module;
+       int id;
+       int err;
+
+       id = ida_simple_get(&module_id, 0, 0, GFP_KERNEL);
+       err = gb_audio_manager_module_create(&module, manager_kset,
+                                            id, desc);
+       if (err) {
+               ida_simple_remove(&module_id, id);
+               return err;
+       }
+
+       /* Add it to the list */
+       down_write(&modules_rwsem);
+       list_add_tail(&module->list, &modules_list);
+       up_write(&modules_rwsem);
+
+       return module->id;
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_add);
+
+int gb_audio_manager_remove(int id)
+{
+       struct gb_audio_manager_module *module;
+
+       down_write(&modules_rwsem);
+
+       module = gb_audio_manager_get_locked(id);
+       if (!module) {
+               up_write(&modules_rwsem);
+               return -EINVAL;
+       }
+       list_del(&module->list);
+       kobject_put(&module->kobj);
+       up_write(&modules_rwsem);
+       ida_simple_remove(&module_id, id);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_remove);
+
+void gb_audio_manager_remove_all(void)
+{
+       struct gb_audio_manager_module *module, *next;
+       int is_empty = 1;
+
+       down_write(&modules_rwsem);
+
+       list_for_each_entry_safe(module, next, &modules_list, list) {
+               list_del(&module->list);
+               kobject_put(&module->kobj);
+               ida_simple_remove(&module_id, module->id);
+       }
+
+       is_empty = list_empty(&modules_list);
+
+       up_write(&modules_rwsem);
+
+       if (!is_empty)
+               pr_warn("Not all nodes were deleted\n");
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_remove_all);
+
+struct gb_audio_manager_module *gb_audio_manager_get_module(int id)
+{
+       struct gb_audio_manager_module *module;
+
+       down_read(&modules_rwsem);
+       module = gb_audio_manager_get_locked(id);
+       kobject_get(&module->kobj);
+       up_read(&modules_rwsem);
+       return module;
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_get_module);
+
+void gb_audio_manager_put_module(struct gb_audio_manager_module *module)
+{
+       kobject_put(&module->kobj);
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_put_module);
+
+int gb_audio_manager_dump_module(int id)
+{
+       struct gb_audio_manager_module *module;
+
+       down_read(&modules_rwsem);
+       module = gb_audio_manager_get_locked(id);
+       up_read(&modules_rwsem);
+
+       if (!module)
+               return -EINVAL;
+
+       gb_audio_manager_module_dump(module);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_dump_module);
+
+void gb_audio_manager_dump_all(void)
+{
+       struct gb_audio_manager_module *module;
+       int count = 0;
+
+       down_read(&modules_rwsem);
+       list_for_each_entry(module, &modules_list, list) {
+               gb_audio_manager_module_dump(module);
+               count++;
+       }
+       up_read(&modules_rwsem);
+
+       pr_info("Number of connected modules: %d\n", count);
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_dump_all);
+
+/*
+ * module init/deinit
+ */
+static int __init manager_init(void)
+{
+       manager_kset = kset_create_and_add(GB_AUDIO_MANAGER_NAME, NULL,
+                                          kernel_kobj);
+       if (!manager_kset)
+               return -ENOMEM;
+
+#ifdef GB_AUDIO_MANAGER_SYSFS
+       gb_audio_manager_sysfs_init(&manager_kset->kobj);
+#endif
+
+       return 0;
+}
+
+static void __exit manager_exit(void)
+{
+       gb_audio_manager_remove_all();
+       kset_unregister(manager_kset);
+       ida_destroy(&module_id);
+}
+
+module_init(manager_init);
+module_exit(manager_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Svetlin Ankov <ankov_svetlin@projectara.com>");
diff --git a/drivers/staging/greybus/audio_manager.h b/drivers/staging/greybus/audio_manager.h
new file mode 100644 (file)
index 0000000..c4ca097
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef _GB_AUDIO_MANAGER_H_
+#define _GB_AUDIO_MANAGER_H_
+
+#include <linux/kobject.h>
+#include <linux/list.h>
+
+#define GB_AUDIO_MANAGER_NAME "gb_audio_manager"
+#define GB_AUDIO_MANAGER_MODULE_NAME_LEN 64
+#define GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "63"
+
+struct gb_audio_manager_module_descriptor {
+       char name[GB_AUDIO_MANAGER_MODULE_NAME_LEN];
+       int slot;
+       int vid;
+       int pid;
+       int cport;
+       unsigned int ip_devices;
+       unsigned int op_devices;
+};
+
+struct gb_audio_manager_module {
+       struct kobject kobj;
+       struct list_head list;
+       int id;
+       struct gb_audio_manager_module_descriptor desc;
+};
+
+/*
+ * Creates a new gb_audio_manager_module_descriptor, using the specified
+ * descriptor.
+ *
+ * Returns a negative result on error, or the id of the newly created module.
+ *
+ */
+int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc);
+
+/*
+ * Removes a connected gb_audio_manager_module_descriptor for the specified ID.
+ *
+ * Returns zero on success, or a negative value on error.
+ */
+int gb_audio_manager_remove(int id);
+
+/*
+ * Removes all connected gb_audio_modules
+ *
+ * Returns zero on success, or a negative value on error.
+ */
+void gb_audio_manager_remove_all(void);
+
+/*
+ * Retrieves a gb_audio_manager_module_descriptor for the specified id.
+ * Returns the gb_audio_manager_module_descriptor structure,
+ * or NULL if there is no module with the specified ID.
+ */
+struct gb_audio_manager_module *gb_audio_manager_get_module(int id);
+
+/*
+ * Decreases the refcount of the module, obtained by the get function.
+ * Modules are removed via gb_audio_manager_remove
+ */
+void gb_audio_manager_put_module(struct gb_audio_manager_module *module);
+
+/*
+ * Dumps the module for the specified id
+ * Return 0 on success
+ */
+int gb_audio_manager_dump_module(int id);
+
+/*
+ * Dumps all connected modules
+ */
+void gb_audio_manager_dump_all(void);
+
+#endif /* _GB_AUDIO_MANAGER_H_ */
diff --git a/drivers/staging/greybus/audio_manager_module.c b/drivers/staging/greybus/audio_manager_module.c
new file mode 100644 (file)
index 0000000..a10e96a
--- /dev/null
@@ -0,0 +1,258 @@
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/slab.h>
+
+#include "audio_manager.h"
+#include "audio_manager_private.h"
+
+#define to_gb_audio_module_attr(x)     \
+               container_of(x, struct gb_audio_manager_module_attribute, attr)
+#define to_gb_audio_module(x)          \
+               container_of(x, struct gb_audio_manager_module, kobj)
+
+struct gb_audio_manager_module_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct gb_audio_manager_module *module,
+                       struct gb_audio_manager_module_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct gb_audio_manager_module *module,
+                        struct gb_audio_manager_module_attribute *attr,
+                        const char *buf, size_t count);
+};
+
+static ssize_t gb_audio_module_attr_show(
+       struct kobject *kobj, struct attribute *attr, char *buf)
+{
+       struct gb_audio_manager_module_attribute *attribute;
+       struct gb_audio_manager_module *module;
+
+       attribute = to_gb_audio_module_attr(attr);
+       module = to_gb_audio_module(kobj);
+
+       if (!attribute->show)
+               return -EIO;
+
+       return attribute->show(module, attribute, buf);
+}
+
+static ssize_t gb_audio_module_attr_store(struct kobject *kobj,
+                                         struct attribute *attr,
+                                         const char *buf, size_t len)
+{
+       struct gb_audio_manager_module_attribute *attribute;
+       struct gb_audio_manager_module *module;
+
+       attribute = to_gb_audio_module_attr(attr);
+       module = to_gb_audio_module(kobj);
+
+       if (!attribute->store)
+               return -EIO;
+
+       return attribute->store(module, attribute, buf, len);
+}
+
+static const struct sysfs_ops gb_audio_module_sysfs_ops = {
+       .show = gb_audio_module_attr_show,
+       .store = gb_audio_module_attr_store,
+};
+
+static void gb_audio_module_release(struct kobject *kobj)
+{
+       struct gb_audio_manager_module *module = to_gb_audio_module(kobj);
+
+       pr_info("Destroying audio module #%d\n", module->id);
+       /* TODO -> delete from list */
+       kfree(module);
+}
+
+static ssize_t gb_audio_module_name_show(
+       struct gb_audio_manager_module *module,
+       struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%s", module->desc.name);
+}
+
+static struct gb_audio_manager_module_attribute gb_audio_module_name_attribute =
+       __ATTR(name, 0664, gb_audio_module_name_show, NULL);
+
+static ssize_t gb_audio_module_slot_show(
+       struct gb_audio_manager_module *module,
+       struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%d", module->desc.slot);
+}
+
+static struct gb_audio_manager_module_attribute gb_audio_module_slot_attribute =
+       __ATTR(slot, 0664, gb_audio_module_slot_show, NULL);
+
+static ssize_t gb_audio_module_vid_show(
+       struct gb_audio_manager_module *module,
+       struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%d", module->desc.vid);
+}
+
+static struct gb_audio_manager_module_attribute gb_audio_module_vid_attribute =
+       __ATTR(vid, 0664, gb_audio_module_vid_show, NULL);
+
+static ssize_t gb_audio_module_pid_show(
+       struct gb_audio_manager_module *module,
+       struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%d", module->desc.pid);
+}
+
+static struct gb_audio_manager_module_attribute gb_audio_module_pid_attribute =
+       __ATTR(pid, 0664, gb_audio_module_pid_show, NULL);
+
+static ssize_t gb_audio_module_cport_show(
+       struct gb_audio_manager_module *module,
+       struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%d", module->desc.cport);
+}
+
+static struct gb_audio_manager_module_attribute
+                                       gb_audio_module_cport_attribute =
+       __ATTR(cport, 0664, gb_audio_module_cport_show, NULL);
+
+static ssize_t gb_audio_module_ip_devices_show(
+       struct gb_audio_manager_module *module,
+       struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+       return sprintf(buf, "0x%X", module->desc.ip_devices);
+}
+
+static struct gb_audio_manager_module_attribute
+                                       gb_audio_module_ip_devices_attribute =
+       __ATTR(ip_devices, 0664, gb_audio_module_ip_devices_show, NULL);
+
+static ssize_t gb_audio_module_op_devices_show(
+       struct gb_audio_manager_module *module,
+       struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+       return sprintf(buf, "0x%X", module->desc.op_devices);
+}
+
+static struct gb_audio_manager_module_attribute
+                                       gb_audio_module_op_devices_attribute =
+       __ATTR(op_devices, 0664, gb_audio_module_op_devices_show, NULL);
+
+static struct attribute *gb_audio_module_default_attrs[] = {
+       &gb_audio_module_name_attribute.attr,
+       &gb_audio_module_slot_attribute.attr,
+       &gb_audio_module_vid_attribute.attr,
+       &gb_audio_module_pid_attribute.attr,
+       &gb_audio_module_cport_attribute.attr,
+       &gb_audio_module_ip_devices_attribute.attr,
+       &gb_audio_module_op_devices_attribute.attr,
+       NULL,   /* need to NULL terminate the list of attributes */
+};
+
+static struct kobj_type gb_audio_module_type = {
+       .sysfs_ops = &gb_audio_module_sysfs_ops,
+       .release = gb_audio_module_release,
+       .default_attrs = gb_audio_module_default_attrs,
+};
+
+static void send_add_uevent(struct gb_audio_manager_module *module)
+{
+       char name_string[128];
+       char slot_string[64];
+       char vid_string[64];
+       char pid_string[64];
+       char cport_string[64];
+       char ip_devices_string[64];
+       char op_devices_string[64];
+
+       char *envp[] = {
+               name_string,
+               slot_string,
+               vid_string,
+               pid_string,
+               cport_string,
+               ip_devices_string,
+               op_devices_string,
+               NULL
+       };
+
+       snprintf(name_string, 128, "NAME=%s", module->desc.name);
+       snprintf(slot_string, 64, "SLOT=%d", module->desc.slot);
+       snprintf(vid_string, 64, "VID=%d", module->desc.vid);
+       snprintf(pid_string, 64, "PID=%d", module->desc.pid);
+       snprintf(cport_string, 64, "CPORT=%d", module->desc.cport);
+       snprintf(ip_devices_string, 64, "I/P DEVICES=0x%X",
+                module->desc.ip_devices);
+       snprintf(op_devices_string, 64, "O/P DEVICES=0x%X",
+                module->desc.op_devices);
+
+       kobject_uevent_env(&module->kobj, KOBJ_ADD, envp);
+}
+
+int gb_audio_manager_module_create(
+       struct gb_audio_manager_module **module,
+       struct kset *manager_kset,
+       int id, struct gb_audio_manager_module_descriptor *desc)
+{
+       int err;
+       struct gb_audio_manager_module *m;
+
+       m = kzalloc(sizeof(*m), GFP_ATOMIC);
+       if (!m)
+               return -ENOMEM;
+
+       /* Initialize the node */
+       INIT_LIST_HEAD(&m->list);
+
+       /* Set the module id */
+       m->id = id;
+
+       /* Copy the provided descriptor */
+       memcpy(&m->desc, desc, sizeof(*desc));
+
+       /* set the kset */
+       m->kobj.kset = manager_kset;
+
+       /*
+        * Initialize and add the kobject to the kernel.  All the default files
+        * will be created here.  As we have already specified a kset for this
+        * kobject, we don't have to set a parent for the kobject, the kobject
+        * will be placed beneath that kset automatically.
+        */
+       err = kobject_init_and_add(&m->kobj, &gb_audio_module_type, NULL, "%d",
+                                  id);
+       if (err) {
+               pr_err("failed initializing kobject for audio module #%d\n",
+                      id);
+               kobject_put(&m->kobj);
+               return err;
+       }
+
+       /*
+        * Notify the object was created
+        */
+       send_add_uevent(m);
+
+       *module = m;
+       pr_info("Created audio module #%d\n", id);
+       return 0;
+}
+
+void gb_audio_manager_module_dump(struct gb_audio_manager_module *module)
+{
+       pr_info("audio module #%d name=%s slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X o/p devices=0x%X\n",
+               module->id,
+               module->desc.name,
+               module->desc.slot,
+               module->desc.vid,
+               module->desc.pid,
+               module->desc.cport,
+               module->desc.ip_devices,
+               module->desc.op_devices);
+}
diff --git a/drivers/staging/greybus/audio_manager_private.h b/drivers/staging/greybus/audio_manager_private.h
new file mode 100644 (file)
index 0000000..079ce95
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef _GB_AUDIO_MANAGER_PRIVATE_H_
+#define _GB_AUDIO_MANAGER_PRIVATE_H_
+
+#include <linux/kobject.h>
+
+#include "audio_manager.h"
+
+int gb_audio_manager_module_create(
+       struct gb_audio_manager_module **module,
+       struct kset *manager_kset,
+       int id, struct gb_audio_manager_module_descriptor *desc);
+
+/* module destroyed via kobject_put */
+
+void gb_audio_manager_module_dump(struct gb_audio_manager_module *module);
+
+/* sysfs control */
+void gb_audio_manager_sysfs_init(struct kobject *kobj);
+
+#endif /* _GB_AUDIO_MANAGER_PRIVATE_H_ */
diff --git a/drivers/staging/greybus/audio_manager_sysfs.c b/drivers/staging/greybus/audio_manager_sysfs.c
new file mode 100644 (file)
index 0000000..d8bf859
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/string.h>
+#include <linux/sysfs.h>
+
+#include "audio_manager.h"
+#include "audio_manager_private.h"
+
+static ssize_t manager_sysfs_add_store(
+       struct kobject *kobj, struct kobj_attribute *attr,
+       const char *buf, size_t count)
+{
+       struct gb_audio_manager_module_descriptor desc = { {0} };
+
+       int num = sscanf(buf,
+                       "name=%" GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "s "
+                       "slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X"
+                       "o/p devices=0x%X",
+                       desc.name, &desc.slot, &desc.vid, &desc.pid,
+                       &desc.cport, &desc.ip_devices, &desc.op_devices);
+
+       if (num != 7)
+               return -EINVAL;
+
+       num = gb_audio_manager_add(&desc);
+       if (num < 0)
+               return -EINVAL;
+
+       return count;
+}
+
+static struct kobj_attribute manager_add_attribute =
+       __ATTR(add, 0664, NULL, manager_sysfs_add_store);
+
+static ssize_t manager_sysfs_remove_store(
+       struct kobject *kobj, struct kobj_attribute *attr,
+       const char *buf, size_t count)
+{
+       int id;
+
+       int num = sscanf(buf, "%d", &id);
+
+       if (num != 1)
+               return -EINVAL;
+
+       num = gb_audio_manager_remove(id);
+       if (num)
+               return num;
+
+       return count;
+}
+
+static struct kobj_attribute manager_remove_attribute =
+       __ATTR(remove, 0664, NULL, manager_sysfs_remove_store);
+
+static ssize_t manager_sysfs_dump_store(
+       struct kobject *kobj, struct kobj_attribute *attr,
+       const char *buf, size_t count)
+{
+       int id;
+
+       int num = sscanf(buf, "%d", &id);
+
+       if (num == 1) {
+               num = gb_audio_manager_dump_module(id);
+               if (num)
+                       return num;
+       } else if (!strncmp("all", buf, 3))
+               gb_audio_manager_dump_all();
+       else
+               return -EINVAL;
+
+       return count;
+}
+
+static struct kobj_attribute manager_dump_attribute =
+       __ATTR(dump, 0664, NULL, manager_sysfs_dump_store);
+
+static void manager_sysfs_init_attribute(
+               struct kobject *kobj, struct kobj_attribute *kattr)
+{
+       int err;
+
+       err = sysfs_create_file(kobj, &kattr->attr);
+       if (err) {
+               pr_warn("creating the sysfs entry for %s failed: %d\n",
+                       kattr->attr.name, err);
+       }
+}
+
+void gb_audio_manager_sysfs_init(struct kobject *kobj)
+{
+       manager_sysfs_init_attribute(kobj, &manager_add_attribute);
+       manager_sysfs_init_attribute(kobj, &manager_remove_attribute);
+       manager_sysfs_init_attribute(kobj, &manager_dump_attribute);
+}
diff --git a/drivers/staging/greybus/audio_module.c b/drivers/staging/greybus/audio_module.c
new file mode 100644 (file)
index 0000000..411735d
--- /dev/null
@@ -0,0 +1,482 @@
+/*
+ * Greybus audio driver
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+
+#include "audio_codec.h"
+#include "audio_apbridgea.h"
+#include "audio_manager.h"
+
+/*
+ * gb_snd management functions
+ */
+
+static int gbaudio_request_jack(struct gbaudio_module_info *module,
+                                 struct gb_audio_jack_event_request *req)
+{
+       int report;
+       struct snd_jack *jack = module->headset_jack.jack;
+       struct snd_jack *btn_jack = module->button_jack.jack;
+
+       if (!jack) {
+               dev_err_ratelimited(module->dev,
+                       "Invalid jack event received:type: %u, event: %u\n",
+                       req->jack_attribute, req->event);
+               return -EINVAL;
+       }
+
+       dev_warn_ratelimited(module->dev,
+                            "Jack Event received: type: %u, event: %u\n",
+                            req->jack_attribute, req->event);
+
+       if (req->event == GB_AUDIO_JACK_EVENT_REMOVAL) {
+               module->jack_type = 0;
+               if (btn_jack && module->button_status) {
+                       snd_soc_jack_report(&module->button_jack, 0,
+                                           module->button_mask);
+                       module->button_status = 0;
+               }
+               snd_soc_jack_report(&module->headset_jack, 0,
+                                   module->jack_mask);
+               return 0;
+       }
+
+       report = req->jack_attribute & module->jack_mask;
+       if (!report) {
+               dev_err_ratelimited(module->dev,
+                       "Invalid jack event received:type: %u, event: %u\n",
+                       req->jack_attribute, req->event);
+               return -EINVAL;
+       }
+
+       if (module->jack_type)
+               dev_warn_ratelimited(module->dev,
+                                    "Modifying jack from %d to %d\n",
+                                    module->jack_type, report);
+
+       module->jack_type = report;
+       snd_soc_jack_report(&module->headset_jack, report, module->jack_mask);
+
+       return 0;
+}
+
+static int gbaudio_request_button(struct gbaudio_module_info *module,
+                                 struct gb_audio_button_event_request *req)
+{
+       int soc_button_id, report;
+       struct snd_jack *btn_jack = module->button_jack.jack;
+
+       if (!btn_jack) {
+               dev_err_ratelimited(module->dev,
+                       "Invalid button event received:type: %u, event: %u\n",
+                       req->button_id, req->event);
+               return -EINVAL;
+       }
+
+       dev_warn_ratelimited(module->dev,
+                            "Button Event received: id: %u, event: %u\n",
+                            req->button_id, req->event);
+
+       /* currently supports 4 buttons only */
+       if (!module->jack_type) {
+               dev_err_ratelimited(module->dev,
+                                   "Jack not present. Bogus event!!\n");
+               return -EINVAL;
+       }
+
+       report = module->button_status & module->button_mask;
+       soc_button_id = 0;
+
+       switch (req->button_id) {
+       case 1:
+               soc_button_id = SND_JACK_BTN_0 & module->button_mask;
+               break;
+
+       case 2:
+               soc_button_id = SND_JACK_BTN_1 & module->button_mask;
+               break;
+
+       case 3:
+               soc_button_id = SND_JACK_BTN_2 & module->button_mask;
+               break;
+
+       case 4:
+               soc_button_id = SND_JACK_BTN_3 & module->button_mask;
+               break;
+       }
+
+       if (!soc_button_id) {
+               dev_err_ratelimited(module->dev,
+                                   "Invalid button request received\n");
+               return -EINVAL;
+       }
+
+       if (req->event == GB_AUDIO_BUTTON_EVENT_PRESS)
+               report = report | soc_button_id;
+       else
+               report = report & ~soc_button_id;
+
+       module->button_status = report;
+
+       snd_soc_jack_report(&module->button_jack, report, module->button_mask);
+
+       return 0;
+}
+
+static int gbaudio_request_stream(struct gbaudio_module_info *module,
+                                 struct gb_audio_streaming_event_request *req)
+{
+       dev_warn(module->dev, "Audio Event received: cport: %u, event: %u\n",
+                req->data_cport, req->event);
+
+       return 0;
+}
+
+static int gbaudio_codec_request_handler(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gbaudio_module_info *module =
+               greybus_get_drvdata(connection->bundle);
+       struct gb_operation_msg_hdr *header = op->request->header;
+       struct gb_audio_streaming_event_request *stream_req;
+       struct gb_audio_jack_event_request *jack_req;
+       struct gb_audio_button_event_request *button_req;
+       int ret;
+
+       switch (header->type) {
+       case GB_AUDIO_TYPE_STREAMING_EVENT:
+               stream_req = op->request->payload;
+               ret = gbaudio_request_stream(module, stream_req);
+               break;
+
+       case GB_AUDIO_TYPE_JACK_EVENT:
+               jack_req = op->request->payload;
+               ret = gbaudio_request_jack(module, jack_req);
+               break;
+
+       case GB_AUDIO_TYPE_BUTTON_EVENT:
+               button_req = op->request->payload;
+               ret = gbaudio_request_button(module, button_req);
+               break;
+
+       default:
+               dev_err_ratelimited(&connection->bundle->dev,
+                                   "Invalid Audio Event received\n");
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
+static int gb_audio_add_mgmt_connection(struct gbaudio_module_info *gbmodule,
+                               struct greybus_descriptor_cport *cport_desc,
+                               struct gb_bundle *bundle)
+{
+       struct gb_connection *connection;
+
+       /* Management Cport */
+       if (gbmodule->mgmt_connection) {
+               dev_err(&bundle->dev,
+                       "Can't have multiple Management connections\n");
+               return -ENODEV;
+       }
+
+       connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+                                         gbaudio_codec_request_handler);
+       if (IS_ERR(connection))
+               return PTR_ERR(connection);
+
+       greybus_set_drvdata(bundle, gbmodule);
+       gbmodule->mgmt_connection = connection;
+
+       return 0;
+}
+
+static int gb_audio_add_data_connection(struct gbaudio_module_info *gbmodule,
+                               struct greybus_descriptor_cport *cport_desc,
+                               struct gb_bundle *bundle)
+{
+       struct gb_connection *connection;
+       struct gbaudio_data_connection *dai;
+
+       dai = devm_kzalloc(gbmodule->dev, sizeof(*dai), GFP_KERNEL);
+       if (!dai) {
+               dev_err(gbmodule->dev, "DAI Malloc failure\n");
+               return -ENOMEM;
+       }
+
+       connection = gb_connection_create_offloaded(bundle,
+                                       le16_to_cpu(cport_desc->id),
+                                       GB_CONNECTION_FLAG_CSD);
+       if (IS_ERR(connection)) {
+               devm_kfree(gbmodule->dev, dai);
+               return PTR_ERR(connection);
+       }
+
+       greybus_set_drvdata(bundle, gbmodule);
+       dai->id = 0;
+       dai->data_cport = connection->intf_cport_id;
+       dai->connection = connection;
+       list_add(&dai->list, &gbmodule->data_list);
+
+       return 0;
+}
+
+/*
+ * This is the basic hook get things initialized and registered w/ gb
+ */
+
+static int gb_audio_probe(struct gb_bundle *bundle,
+                         const struct greybus_bundle_id *id)
+{
+       struct device *dev = &bundle->dev;
+       struct gbaudio_module_info *gbmodule;
+       struct greybus_descriptor_cport *cport_desc;
+       struct gb_audio_manager_module_descriptor desc;
+       struct gbaudio_data_connection *dai, *_dai;
+       int ret, i;
+       struct gb_audio_topology *topology;
+
+       /* There should be at least one Management and one Data cport */
+       if (bundle->num_cports < 2)
+               return -ENODEV;
+
+       /*
+        * There can be only one Management connection and any number of data
+        * connections.
+        */
+       gbmodule = devm_kzalloc(dev, sizeof(*gbmodule), GFP_KERNEL);
+       if (!gbmodule)
+               return -ENOMEM;
+
+       gbmodule->num_data_connections = bundle->num_cports - 1;
+       INIT_LIST_HEAD(&gbmodule->data_list);
+       INIT_LIST_HEAD(&gbmodule->widget_list);
+       INIT_LIST_HEAD(&gbmodule->ctl_list);
+       INIT_LIST_HEAD(&gbmodule->widget_ctl_list);
+       gbmodule->dev = dev;
+       snprintf(gbmodule->name, NAME_SIZE, "%s.%s", dev->driver->name,
+                dev_name(dev));
+       greybus_set_drvdata(bundle, gbmodule);
+
+       /* Create all connections */
+       for (i = 0; i < bundle->num_cports; i++) {
+               cport_desc = &bundle->cport_desc[i];
+
+               switch (cport_desc->protocol_id) {
+               case GREYBUS_PROTOCOL_AUDIO_MGMT:
+                       ret = gb_audio_add_mgmt_connection(gbmodule, cport_desc,
+                                                          bundle);
+                       if (ret)
+                               goto destroy_connections;
+                       break;
+               case GREYBUS_PROTOCOL_AUDIO_DATA:
+                       ret = gb_audio_add_data_connection(gbmodule, cport_desc,
+                                                          bundle);
+                       if (ret)
+                               goto destroy_connections;
+                       break;
+               default:
+                       dev_err(dev, "Unsupported protocol: 0x%02x\n",
+                               cport_desc->protocol_id);
+                       ret = -ENODEV;
+                       goto destroy_connections;
+               }
+       }
+
+       /* There must be a management cport */
+       if (!gbmodule->mgmt_connection) {
+               ret = -EINVAL;
+               dev_err(dev, "Missing management connection\n");
+               goto destroy_connections;
+       }
+
+       /* Initialize management connection */
+       ret = gb_connection_enable(gbmodule->mgmt_connection);
+       if (ret) {
+               dev_err(dev, "%d: Error while enabling mgmt connection\n", ret);
+               goto destroy_connections;
+       }
+       gbmodule->dev_id = gbmodule->mgmt_connection->intf->interface_id;
+
+       /*
+        * FIXME: malloc for topology happens via audio_gb driver
+        * should be done within codec driver itself
+        */
+       ret = gb_audio_gb_get_topology(gbmodule->mgmt_connection, &topology);
+       if (ret) {
+               dev_err(dev, "%d:Error while fetching topology\n", ret);
+               goto disable_connection;
+       }
+
+       /* process topology data */
+       ret = gbaudio_tplg_parse_data(gbmodule, topology);
+       if (ret) {
+               dev_err(dev, "%d:Error while parsing topology data\n",
+                         ret);
+               goto free_topology;
+       }
+       gbmodule->topology = topology;
+
+       /* Initialize data connections */
+       list_for_each_entry(dai, &gbmodule->data_list, list) {
+               ret = gb_connection_enable(dai->connection);
+               if (ret) {
+                       dev_err(dev,
+                               "%d:Error while enabling %d:data connection\n",
+                               ret, dai->data_cport);
+                       goto disable_data_connection;
+               }
+       }
+
+       /* register module with gbcodec */
+       ret = gbaudio_register_module(gbmodule);
+       if (ret)
+               goto disable_data_connection;
+
+       /* inform above layer for uevent */
+       dev_dbg(dev, "Inform set_event:%d to above layer\n", 1);
+       /* prepare for the audio manager */
+       strlcpy(desc.name, gbmodule->name, GB_AUDIO_MANAGER_MODULE_NAME_LEN);
+       desc.slot = 1; /* todo */
+       desc.vid = 2; /* todo */
+       desc.pid = 3; /* todo */
+       desc.cport = gbmodule->dev_id;
+       desc.op_devices = gbmodule->op_devices;
+       desc.ip_devices = gbmodule->ip_devices;
+       gbmodule->manager_id = gb_audio_manager_add(&desc);
+
+       dev_dbg(dev, "Add GB Audio device:%s\n", gbmodule->name);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return 0;
+
+disable_data_connection:
+       list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list)
+               gb_connection_disable(dai->connection);
+       gbaudio_tplg_release(gbmodule);
+       gbmodule->topology = NULL;
+
+free_topology:
+       kfree(topology);
+
+disable_connection:
+       gb_connection_disable(gbmodule->mgmt_connection);
+
+destroy_connections:
+       list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list) {
+               gb_connection_destroy(dai->connection);
+               list_del(&dai->list);
+               devm_kfree(dev, dai);
+       }
+
+       if (gbmodule->mgmt_connection)
+               gb_connection_destroy(gbmodule->mgmt_connection);
+
+       devm_kfree(dev, gbmodule);
+
+       return ret;
+}
+
+static void gb_audio_disconnect(struct gb_bundle *bundle)
+{
+       struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
+       struct gbaudio_data_connection *dai, *_dai;
+
+       gb_pm_runtime_get_sync(bundle);
+
+       /* cleanup module related resources first */
+       gbaudio_unregister_module(gbmodule);
+
+       /* inform uevent to above layers */
+       gb_audio_manager_remove(gbmodule->manager_id);
+
+       gbaudio_tplg_release(gbmodule);
+       kfree(gbmodule->topology);
+       gbmodule->topology = NULL;
+       gb_connection_disable(gbmodule->mgmt_connection);
+       list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list) {
+               gb_connection_disable(dai->connection);
+               gb_connection_destroy(dai->connection);
+               list_del(&dai->list);
+               devm_kfree(gbmodule->dev, dai);
+       }
+       gb_connection_destroy(gbmodule->mgmt_connection);
+       gbmodule->mgmt_connection = NULL;
+
+       devm_kfree(&bundle->dev, gbmodule);
+}
+
+static const struct greybus_bundle_id gb_audio_id_table[] = {
+       { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_AUDIO) },
+       { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_audio_id_table);
+
+#ifdef CONFIG_PM_RUNTIME
+static int gb_audio_suspend(struct device *dev)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+       struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
+       struct gbaudio_data_connection *dai;
+
+       list_for_each_entry(dai, &gbmodule->data_list, list)
+               gb_connection_disable(dai->connection);
+
+       gb_connection_disable(gbmodule->mgmt_connection);
+
+       return 0;
+}
+
+static int gb_audio_resume(struct device *dev)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+       struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
+       struct gbaudio_data_connection *dai;
+       int ret;
+
+       ret = gb_connection_enable(gbmodule->mgmt_connection);
+       if (ret) {
+               dev_err(dev, "%d:Error while enabling mgmt connection\n", ret);
+               return ret;
+       }
+
+       list_for_each_entry(dai, &gbmodule->data_list, list) {
+               ret = gb_connection_enable(dai->connection);
+               if (ret) {
+                       dev_err(dev,
+                               "%d:Error while enabling %d:data connection\n",
+                               ret, dai->data_cport);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_audio_pm_ops = {
+       SET_RUNTIME_PM_OPS(gb_audio_suspend, gb_audio_resume, NULL)
+};
+
+static struct greybus_driver gb_audio_driver = {
+       .name           = "gb-audio",
+       .probe          = gb_audio_probe,
+       .disconnect     = gb_audio_disconnect,
+       .id_table       = gb_audio_id_table,
+       .driver.pm      = &gb_audio_pm_ops,
+};
+module_greybus_driver(gb_audio_driver);
+
+MODULE_DESCRIPTION("Greybus Audio module driver");
+MODULE_AUTHOR("Vaibhav Agarwal <vaibhav.agarwal@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gbaudio-module");
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
new file mode 100644 (file)
index 0000000..5eef536
--- /dev/null
@@ -0,0 +1,1442 @@
+/*
+ * Greybus audio driver
+ * Copyright 2015-2016 Google Inc.
+ * Copyright 2015-2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "audio_codec.h"
+#include "greybus_protocols.h"
+
+#define GBAUDIO_INVALID_ID     0xFF
+
+/* mixer control */
+struct gb_mixer_control {
+       int min, max;
+       unsigned int reg, rreg, shift, rshift, invert;
+};
+
+struct gbaudio_ctl_pvt {
+       unsigned int ctl_id;
+       unsigned int data_cport;
+       unsigned int access;
+       unsigned int vcount;
+       struct gb_audio_ctl_elem_info *info;
+};
+
+static struct gbaudio_module_info *find_gb_module(
+                                       struct gbaudio_codec_info *codec,
+                                       char const *name)
+{
+       int dev_id, ret;
+       char begin[NAME_SIZE];
+       struct gbaudio_module_info *module;
+
+       if (!name)
+               return NULL;
+
+       ret = sscanf(name, "%s %d", begin, &dev_id);
+       dev_dbg(codec->dev, "%s:Find module#%d\n", __func__, dev_id);
+
+       mutex_lock(&codec->lock);
+       list_for_each_entry(module, &codec->module_list, list) {
+               if (module->dev_id == dev_id) {
+                       mutex_unlock(&codec->lock);
+                       return module;
+               }
+       }
+       mutex_unlock(&codec->lock);
+       dev_warn(codec->dev, "%s: module#%d missing in codec list\n", name,
+                dev_id);
+       return NULL;
+}
+
+static const char *gbaudio_map_controlid(struct gbaudio_module_info *module,
+                                        __u8 control_id, __u8 index)
+{
+       struct gbaudio_control *control;
+
+       if (control_id == GBAUDIO_INVALID_ID)
+               return NULL;
+
+       list_for_each_entry(control, &module->ctl_list, list) {
+               if (control->id == control_id) {
+                       if (index == GBAUDIO_INVALID_ID)
+                               return control->name;
+                       if (index >= control->items)
+                               return NULL;
+                       return control->texts[index];
+               }
+       }
+       list_for_each_entry(control, &module->widget_ctl_list, list) {
+               if (control->id == control_id) {
+                       if (index == GBAUDIO_INVALID_ID)
+                               return control->name;
+                       if (index >= control->items)
+                               return NULL;
+                       return control->texts[index];
+               }
+       }
+       return NULL;
+}
+
+static int gbaudio_map_controlname(struct gbaudio_module_info *module,
+                                  const char *name)
+{
+       struct gbaudio_control *control;
+
+       list_for_each_entry(control, &module->ctl_list, list) {
+               if (!strncmp(control->name, name, NAME_SIZE))
+                       return control->id;
+       }
+
+       dev_warn(module->dev, "%s: missing in modules controls list\n", name);
+
+       return -EINVAL;
+}
+
+static int gbaudio_map_wcontrolname(struct gbaudio_module_info *module,
+                                   const char *name)
+{
+       struct gbaudio_control *control;
+
+       list_for_each_entry(control, &module->widget_ctl_list, list) {
+               if (!strncmp(control->wname, name, NAME_SIZE))
+                       return control->id;
+       }
+       dev_warn(module->dev, "%s: missing in modules controls list\n", name);
+
+       return -EINVAL;
+}
+
+static int gbaudio_map_widgetname(struct gbaudio_module_info *module,
+                                 const char *name)
+{
+       struct gbaudio_widget *widget;
+       list_for_each_entry(widget, &module->widget_list, list) {
+               if (!strncmp(widget->name, name, NAME_SIZE))
+                       return widget->id;
+       }
+       dev_warn(module->dev, "%s: missing in modules widgets list\n", name);
+
+       return -EINVAL;
+}
+
+static const char *gbaudio_map_widgetid(struct gbaudio_module_info *module,
+                                       __u8 widget_id)
+{
+       struct gbaudio_widget *widget;
+
+       list_for_each_entry(widget, &module->widget_list, list) {
+               if (widget->id == widget_id)
+                       return widget->name;
+       }
+       return NULL;
+}
+
+static const char **gb_generate_enum_strings(struct gbaudio_module_info *gb,
+                                            struct gb_audio_enumerated *gbenum)
+{
+       const char **strings;
+       int i;
+       __u8 *data;
+
+       strings = devm_kzalloc(gb->dev, sizeof(char *) * gbenum->items,
+                              GFP_KERNEL);
+       data = gbenum->names;
+
+       for (i = 0; i < gbenum->items; i++) {
+               strings[i] = (const char *)data;
+               while (*data != '\0')
+                       data++;
+               data++;
+       }
+
+       return strings;
+}
+
+static int gbcodec_mixer_ctl_info(struct snd_kcontrol *kcontrol,
+                    struct snd_ctl_elem_info *uinfo)
+{
+       unsigned int max;
+       const char *name;
+       struct gbaudio_ctl_pvt *data;
+       struct gb_audio_ctl_elem_info *info;
+       struct gbaudio_module_info *module;
+       struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+       struct gbaudio_codec_info *gbcodec = snd_soc_codec_get_drvdata(codec);
+
+       dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+       data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+       info = (struct gb_audio_ctl_elem_info *)data->info;
+
+       if (!info) {
+               dev_err(module->dev, "NULL info for %s\n", uinfo->id.name);
+               return -EINVAL;
+       }
+
+       /* update uinfo */
+       uinfo->access = data->access;
+       uinfo->count = data->vcount;
+       uinfo->type = (snd_ctl_elem_type_t)info->type;
+
+       switch (info->type) {
+       case GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN:
+       case GB_AUDIO_CTL_ELEM_TYPE_INTEGER:
+               uinfo->value.integer.min = info->value.integer.min;
+               uinfo->value.integer.max = info->value.integer.max;
+               break;
+       case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+               max = info->value.enumerated.items;
+               uinfo->value.enumerated.items = max;
+               if (uinfo->value.enumerated.item > max - 1)
+                       uinfo->value.enumerated.item = max - 1;
+               module = find_gb_module(gbcodec, kcontrol->id.name);
+               if (!module)
+                       return -EINVAL;
+               name = gbaudio_map_controlid(module, data->ctl_id,
+                                            uinfo->value.enumerated.item);
+               strlcpy(uinfo->value.enumerated.name, name, NAME_SIZE);
+               break;
+       default:
+               dev_err(codec->dev, "Invalid type: %d for %s:kcontrol\n",
+                       info->type, kcontrol->id.name);
+               break;
+       }
+       return 0;
+}
+
+static int gbcodec_mixer_ctl_get(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       int ret;
+       struct gb_audio_ctl_elem_info *info;
+       struct gbaudio_ctl_pvt *data;
+       struct gb_audio_ctl_elem_value gbvalue;
+       struct gbaudio_module_info *module;
+       struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+       struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+       struct gb_bundle *bundle;
+
+       dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+       module = find_gb_module(gb, kcontrol->id.name);
+       if (!module)
+               return -EINVAL;
+
+       data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+       info = (struct gb_audio_ctl_elem_info *)data->info;
+       bundle = to_gb_bundle(module->dev);
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret)
+               return ret;
+
+       ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id,
+                                     GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       if (ret) {
+               dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
+                                   __func__, kcontrol->id.name);
+               return ret;
+       }
+
+       /* update ucontrol */
+       switch (info->type) {
+       case GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN:
+       case GB_AUDIO_CTL_ELEM_TYPE_INTEGER:
+               ucontrol->value.integer.value[0] =
+                       gbvalue.value.integer_value[0];
+               if (data->vcount == 2)
+                       ucontrol->value.integer.value[1] =
+                               gbvalue.value.integer_value[1];
+               break;
+       case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+               ucontrol->value.enumerated.item[0] =
+                       gbvalue.value.enumerated_item[0];
+               if (data->vcount == 2)
+                       ucontrol->value.enumerated.item[1] =
+                               gbvalue.value.enumerated_item[1];
+               break;
+       default:
+               dev_err(codec->dev, "Invalid type: %d for %s:kcontrol\n",
+                       info->type, kcontrol->id.name);
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
+static int gbcodec_mixer_ctl_put(struct snd_kcontrol *kcontrol,
+                             struct snd_ctl_elem_value *ucontrol)
+{
+       int ret = 0;
+       struct gb_audio_ctl_elem_info *info;
+       struct gbaudio_ctl_pvt *data;
+       struct gb_audio_ctl_elem_value gbvalue;
+       struct gbaudio_module_info *module;
+       struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+       struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+       struct gb_bundle *bundle;
+
+       dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+       module = find_gb_module(gb, kcontrol->id.name);
+       if (!module)
+               return -EINVAL;
+
+       data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+       info = (struct gb_audio_ctl_elem_info *)data->info;
+       bundle = to_gb_bundle(module->dev);
+
+       /* update ucontrol */
+       switch (info->type) {
+       case GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN:
+       case GB_AUDIO_CTL_ELEM_TYPE_INTEGER:
+               gbvalue.value.integer_value[0] =
+                       ucontrol->value.integer.value[0];
+               if (data->vcount == 2)
+                       gbvalue.value.integer_value[1] =
+                               ucontrol->value.integer.value[1];
+               break;
+       case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+               gbvalue.value.enumerated_item[0] =
+                       ucontrol->value.enumerated.item[0];
+               if (data->vcount == 2)
+                       gbvalue.value.enumerated_item[1] =
+                               ucontrol->value.enumerated.item[1];
+               break;
+       default:
+               dev_err(codec->dev, "Invalid type: %d for %s:kcontrol\n",
+                       info->type, kcontrol->id.name);
+               ret = -EINVAL;
+               break;
+       }
+
+       if (ret)
+               return ret;
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret)
+               return ret;
+
+       ret = gb_audio_gb_set_control(module->mgmt_connection, data->ctl_id,
+                                     GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       if (ret) {
+               dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
+                                   __func__, kcontrol->id.name);
+       }
+
+       return ret;
+}
+
+#define SOC_MIXER_GB(xname, kcount, data) \
+{      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+       .count = kcount, .info = gbcodec_mixer_ctl_info, \
+       .get = gbcodec_mixer_ctl_get, .put = gbcodec_mixer_ctl_put, \
+       .private_value = (unsigned long)data }
+
+/*
+ * although below callback functions seems redundant to above functions.
+ * same are kept to allow provision for different handling in case
+ * of DAPM related sequencing, etc.
+ */
+static int gbcodec_mixer_dapm_ctl_info(struct snd_kcontrol *kcontrol,
+                    struct snd_ctl_elem_info *uinfo)
+{
+       int platform_max, platform_min;
+       struct gbaudio_ctl_pvt *data;
+       struct gb_audio_ctl_elem_info *info;
+       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+       struct snd_soc_codec *codec = widget->codec;
+
+       dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+       data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+       info = (struct gb_audio_ctl_elem_info *)data->info;
+
+       /* update uinfo */
+       platform_max = info->value.integer.max;
+       platform_min = info->value.integer.min;
+
+       if (platform_max == 1 &&
+           !strnstr(kcontrol->id.name, " Volume", NAME_SIZE))
+               uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+       else
+               uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+
+       uinfo->count = data->vcount;
+       uinfo->value.integer.min = 0;
+       if (info->value.integer.min < 0 &&
+           (uinfo->type == SNDRV_CTL_ELEM_TYPE_INTEGER))
+               uinfo->value.integer.max = platform_max - platform_min;
+       else
+               uinfo->value.integer.max = platform_max;
+
+       return 0;
+}
+
+static int gbcodec_mixer_dapm_ctl_get(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       int ret;
+       struct gb_audio_ctl_elem_info *info;
+       struct gbaudio_ctl_pvt *data;
+       struct gb_audio_ctl_elem_value gbvalue;
+       struct gbaudio_module_info *module;
+       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+       struct snd_soc_codec *codec = widget->codec;
+       struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+       struct gb_bundle *bundle;
+
+       dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+       module = find_gb_module(gb, kcontrol->id.name);
+       if (!module)
+               return -EINVAL;
+
+       data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+       info = (struct gb_audio_ctl_elem_info *)data->info;
+       bundle = to_gb_bundle(module->dev);
+
+       if (data->vcount == 2)
+               dev_warn(widget->dapm->dev,
+                        "GB: Control '%s' is stereo, which is not supported\n",
+                        kcontrol->id.name);
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret)
+               return ret;
+
+       ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id,
+                                     GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       if (ret) {
+               dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
+                                   __func__, kcontrol->id.name);
+               return ret;
+       }
+       /* update ucontrol */
+       ucontrol->value.integer.value[0] = gbvalue.value.integer_value[0];
+
+       return ret;
+}
+
+static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol,
+                             struct snd_ctl_elem_value *ucontrol)
+{
+       int ret, wi, max, connect;
+       unsigned int mask, val;
+       struct gb_audio_ctl_elem_info *info;
+       struct gbaudio_ctl_pvt *data;
+       struct gb_audio_ctl_elem_value gbvalue;
+       struct gbaudio_module_info *module;
+       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+       struct snd_soc_codec *codec = widget->codec;
+       struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+       struct gb_bundle *bundle;
+
+       dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+       module = find_gb_module(gb, kcontrol->id.name);
+       if (!module)
+               return -EINVAL;
+
+       data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+       info = (struct gb_audio_ctl_elem_info *)data->info;
+       bundle = to_gb_bundle(module->dev);
+
+       if (data->vcount == 2)
+               dev_warn(widget->dapm->dev,
+                        "GB: Control '%s' is stereo, which is not supported\n",
+                        kcontrol->id.name);
+
+       max = info->value.integer.max;
+       mask = (1 << fls(max)) - 1;
+       val = (ucontrol->value.integer.value[0] & mask);
+       connect = !!val;
+
+       /* update ucontrol */
+       if (gbvalue.value.integer_value[0] != val) {
+               for (wi = 0; wi < wlist->num_widgets; wi++) {
+                       widget = wlist->widgets[wi];
+
+                       widget->value = val;
+                       widget->dapm->update = NULL;
+                       snd_soc_dapm_mixer_update_power(widget, kcontrol,
+                                                       connect);
+               }
+               gbvalue.value.integer_value[0] =
+                       ucontrol->value.integer.value[0];
+
+               ret = gb_pm_runtime_get_sync(bundle);
+               if (ret)
+                       return ret;
+
+               ret = gb_audio_gb_set_control(module->mgmt_connection,
+                                             data->ctl_id,
+                                             GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+               gb_pm_runtime_put_autosuspend(bundle);
+
+               if (ret) {
+                       dev_err_ratelimited(codec->dev,
+                                           "%d:Error in %s for %s\n", ret,
+                                           __func__, kcontrol->id.name);
+               }
+       }
+
+       return ret;
+}
+
+#define SOC_DAPM_MIXER_GB(xname, kcount, data) \
+{      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+       .count = kcount, .info = gbcodec_mixer_dapm_ctl_info, \
+       .get = gbcodec_mixer_dapm_ctl_get, .put = gbcodec_mixer_dapm_ctl_put, \
+       .private_value = (unsigned long)data}
+
+static int gbcodec_event_spk(struct snd_soc_dapm_widget *w,
+                                       struct snd_kcontrol *k, int event)
+{
+       /* Ensure GB speaker is connected */
+
+       return 0;
+}
+
+static int gbcodec_event_hp(struct snd_soc_dapm_widget *w,
+                                       struct snd_kcontrol *k, int event)
+{
+       /* Ensure GB module supports jack slot */
+
+       return 0;
+}
+
+static int gbcodec_event_int_mic(struct snd_soc_dapm_widget *w,
+                                       struct snd_kcontrol *k, int event)
+{
+       /* Ensure GB module supports jack slot */
+
+       return 0;
+}
+
+static int gbaudio_validate_kcontrol_count(struct gb_audio_widget *w)
+{
+       int ret = 0;
+
+       switch (w->type) {
+       case snd_soc_dapm_spk:
+       case snd_soc_dapm_hp:
+       case snd_soc_dapm_mic:
+       case snd_soc_dapm_output:
+       case snd_soc_dapm_input:
+               if (w->ncontrols)
+                       ret = -EINVAL;
+               break;
+       case snd_soc_dapm_switch:
+       case snd_soc_dapm_mux:
+               if (w->ncontrols != 1)
+                       ret = -EINVAL;
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+static int gbcodec_enum_ctl_get(struct snd_kcontrol *kcontrol,
+                               struct snd_ctl_elem_value *ucontrol)
+{
+       int ret, ctl_id;
+       struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+       struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+       struct gb_audio_ctl_elem_value gbvalue;
+       struct gbaudio_module_info *module;
+       struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+       struct gb_bundle *bundle;
+
+       module = find_gb_module(gb, kcontrol->id.name);
+       if (!module)
+               return -EINVAL;
+
+       ctl_id = gbaudio_map_controlname(module, kcontrol->id.name);
+       if (ctl_id < 0)
+               return -EINVAL;
+
+       bundle = to_gb_bundle(module->dev);
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret)
+               return ret;
+
+       ret = gb_audio_gb_get_control(module->mgmt_connection, ctl_id,
+                                     GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       if (ret) {
+               dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
+                                   __func__, kcontrol->id.name);
+               return ret;
+       }
+
+       ucontrol->value.enumerated.item[0] = gbvalue.value.enumerated_item[0];
+       if (e->shift_l != e->shift_r)
+               ucontrol->value.enumerated.item[1] =
+                       gbvalue.value.enumerated_item[1];
+
+       return 0;
+}
+
+static int gbcodec_enum_ctl_put(struct snd_kcontrol *kcontrol,
+                               struct snd_ctl_elem_value *ucontrol)
+{
+       int ret, ctl_id;
+       struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+       struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+       struct gb_audio_ctl_elem_value gbvalue;
+       struct gbaudio_module_info *module;
+       struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+       struct gb_bundle *bundle;
+
+       module = find_gb_module(gb, kcontrol->id.name);
+       if (!module)
+               return -EINVAL;
+
+       ctl_id = gbaudio_map_controlname(module, kcontrol->id.name);
+       if (ctl_id < 0)
+               return -EINVAL;
+
+       if (ucontrol->value.enumerated.item[0] > e->max - 1)
+               return -EINVAL;
+       gbvalue.value.enumerated_item[0] = ucontrol->value.enumerated.item[0];
+
+       if (e->shift_l != e->shift_r) {
+               if (ucontrol->value.enumerated.item[1] > e->max - 1)
+                       return -EINVAL;
+               gbvalue.value.enumerated_item[1] =
+                       ucontrol->value.enumerated.item[1];
+       }
+
+       bundle = to_gb_bundle(module->dev);
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret)
+               return ret;
+
+       ret = gb_audio_gb_set_control(module->mgmt_connection, ctl_id,
+                                     GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       if (ret) {
+               dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
+                                   __func__, kcontrol->id.name);
+       }
+
+       return ret;
+}
+
+static int gbaudio_tplg_create_enum_kctl(struct gbaudio_module_info *gb,
+                                        struct snd_kcontrol_new *kctl,
+                                        struct gb_audio_control *ctl)
+{
+       struct soc_enum *gbe;
+       struct gb_audio_enumerated *gb_enum;
+       int i;
+
+       gbe = devm_kzalloc(gb->dev, sizeof(*gbe), GFP_KERNEL);
+       if (!gbe)
+               return -ENOMEM;
+
+       gb_enum = &ctl->info.value.enumerated;
+
+       /* since count=1, and reg is dummy */
+       gbe->max = gb_enum->items;
+       gbe->texts = gb_generate_enum_strings(gb, gb_enum);
+
+       /* debug enum info */
+       dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gb_enum->items,
+                gb_enum->names_length);
+       for (i = 0; i < gb_enum->items; i++)
+               dev_dbg(gb->dev, "src[%d]: %s\n", i, gbe->texts[i]);
+
+       *kctl = (struct snd_kcontrol_new)
+               SOC_ENUM_EXT(ctl->name, *gbe, gbcodec_enum_ctl_get,
+                            gbcodec_enum_ctl_put);
+       return 0;
+}
+
+static int gbaudio_tplg_create_kcontrol(struct gbaudio_module_info *gb,
+                                       struct snd_kcontrol_new *kctl,
+                                       struct gb_audio_control *ctl)
+{
+       int ret = 0;
+       struct gbaudio_ctl_pvt *ctldata;
+
+       switch (ctl->iface) {
+       case SNDRV_CTL_ELEM_IFACE_MIXER:
+               switch (ctl->info.type) {
+               case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+                       ret = gbaudio_tplg_create_enum_kctl(gb, kctl, ctl);
+                       break;
+               default:
+                       ctldata = devm_kzalloc(gb->dev,
+                                              sizeof(struct gbaudio_ctl_pvt),
+                                              GFP_KERNEL);
+                       if (!ctldata)
+                               return -ENOMEM;
+                       ctldata->ctl_id = ctl->id;
+                       ctldata->data_cport = ctl->data_cport;
+                       ctldata->access = ctl->access;
+                       ctldata->vcount = ctl->count_values;
+                       ctldata->info = &ctl->info;
+                       *kctl = (struct snd_kcontrol_new)
+                               SOC_MIXER_GB(ctl->name, ctl->count, ctldata);
+                       ctldata = NULL;
+                       break;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       dev_dbg(gb->dev, "%s:%d control created\n", ctl->name, ctl->id);
+       return ret;
+}
+
+static int gbcodec_enum_dapm_ctl_get(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       int ret, ctl_id;
+       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+       struct gbaudio_module_info *module;
+       struct gb_audio_ctl_elem_value gbvalue;
+       struct snd_soc_codec *codec = widget->codec;
+       struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+       struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+       struct gb_bundle *bundle;
+
+       module = find_gb_module(gb, kcontrol->id.name);
+       if (!module)
+               return -EINVAL;
+
+       ctl_id = gbaudio_map_wcontrolname(module, kcontrol->id.name);
+       if (ctl_id < 0)
+               return -EINVAL;
+
+       bundle = to_gb_bundle(module->dev);
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret)
+               return ret;
+
+       ret = gb_audio_gb_get_control(module->mgmt_connection, ctl_id,
+                                     GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       if (ret) {
+               dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
+                                   __func__, kcontrol->id.name);
+               return ret;
+       }
+
+       ucontrol->value.enumerated.item[0] = gbvalue.value.enumerated_item[0];
+       if (e->shift_l != e->shift_r)
+               ucontrol->value.enumerated.item[1] =
+                       gbvalue.value.enumerated_item[1];
+
+       return 0;
+}
+
+static int gbcodec_enum_dapm_ctl_put(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       int ret, wi, ctl_id;
+       unsigned int val, mux, change;
+       unsigned int mask;
+       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+       struct gb_audio_ctl_elem_value gbvalue;
+       struct gbaudio_module_info *module;
+       struct snd_soc_codec *codec = widget->codec;
+       struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+       struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+       struct gb_bundle *bundle;
+
+       if (ucontrol->value.enumerated.item[0] > e->max - 1)
+               return -EINVAL;
+
+       module = find_gb_module(gb, kcontrol->id.name);
+       if (!module)
+               return -EINVAL;
+
+       ctl_id = gbaudio_map_wcontrolname(module, kcontrol->id.name);
+       if (ctl_id < 0)
+               return -EINVAL;
+
+       change = 0;
+       bundle = to_gb_bundle(module->dev);
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret)
+               return ret;
+
+       ret = gb_audio_gb_get_control(module->mgmt_connection, ctl_id,
+                                     GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       if (ret) {
+               dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
+                                   __func__, kcontrol->id.name);
+               return ret;
+       }
+
+       mux = ucontrol->value.enumerated.item[0];
+       val = mux << e->shift_l;
+       mask = e->mask << e->shift_l;
+
+       if (gbvalue.value.enumerated_item[0] !=
+           ucontrol->value.enumerated.item[0]) {
+               change = 1;
+               gbvalue.value.enumerated_item[0] =
+                       ucontrol->value.enumerated.item[0];
+       }
+
+       if (e->shift_l != e->shift_r) {
+               if (ucontrol->value.enumerated.item[1] > e->max - 1)
+                       return -EINVAL;
+               val |= ucontrol->value.enumerated.item[1] << e->shift_r;
+               mask |= e->mask << e->shift_r;
+               if (gbvalue.value.enumerated_item[1] !=
+                   ucontrol->value.enumerated.item[1]) {
+                       change = 1;
+                       gbvalue.value.enumerated_item[1] =
+                               ucontrol->value.enumerated.item[1];
+               }
+       }
+
+       if (change) {
+               ret = gb_pm_runtime_get_sync(bundle);
+               if (ret)
+                       return ret;
+
+               ret = gb_audio_gb_set_control(module->mgmt_connection, ctl_id,
+                                             GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+               gb_pm_runtime_put_autosuspend(bundle);
+
+               if (ret) {
+                       dev_err_ratelimited(codec->dev,
+                                           "%d:Error in %s for %s\n", ret,
+                                           __func__, kcontrol->id.name);
+               }
+               for (wi = 0; wi < wlist->num_widgets; wi++) {
+                       widget = wlist->widgets[wi];
+
+                       widget->value = val;
+                       widget->dapm->update = NULL;
+                       snd_soc_dapm_mux_update_power(widget, kcontrol, mux, e);
+               }
+       }
+
+       return change;
+}
+
+static int gbaudio_tplg_create_enum_ctl(struct gbaudio_module_info *gb,
+                                       struct snd_kcontrol_new *kctl,
+                                       struct gb_audio_control *ctl)
+{
+       struct soc_enum *gbe;
+       struct gb_audio_enumerated *gb_enum;
+       int i;
+
+       gbe = devm_kzalloc(gb->dev, sizeof(*gbe), GFP_KERNEL);
+       if (!gbe)
+               return -ENOMEM;
+
+       gb_enum = &ctl->info.value.enumerated;
+
+       /* since count=1, and reg is dummy */
+       gbe->max = gb_enum->items;
+       gbe->texts = gb_generate_enum_strings(gb, gb_enum);
+
+       /* debug enum info */
+       dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gb_enum->items,
+                gb_enum->names_length);
+       for (i = 0; i < gb_enum->items; i++)
+               dev_dbg(gb->dev, "src[%d]: %s\n", i, gbe->texts[i]);
+
+       *kctl = (struct snd_kcontrol_new)
+               SOC_DAPM_ENUM_EXT(ctl->name, *gbe, gbcodec_enum_dapm_ctl_get,
+                                 gbcodec_enum_dapm_ctl_put);
+       return 0;
+}
+
+static int gbaudio_tplg_create_mixer_ctl(struct gbaudio_module_info *gb,
+                                            struct snd_kcontrol_new *kctl,
+                                            struct gb_audio_control *ctl)
+{
+       struct gbaudio_ctl_pvt *ctldata;
+
+       ctldata = devm_kzalloc(gb->dev, sizeof(struct gbaudio_ctl_pvt),
+                              GFP_KERNEL);
+       if (!ctldata)
+               return -ENOMEM;
+       ctldata->ctl_id = ctl->id;
+       ctldata->data_cport = ctl->data_cport;
+       ctldata->access = ctl->access;
+       ctldata->vcount = ctl->count_values;
+       ctldata->info = &ctl->info;
+       *kctl = (struct snd_kcontrol_new)
+               SOC_DAPM_MIXER_GB(ctl->name, ctl->count, ctldata);
+
+       return 0;
+}
+
+static int gbaudio_tplg_create_wcontrol(struct gbaudio_module_info *gb,
+                                            struct snd_kcontrol_new *kctl,
+                                            struct gb_audio_control *ctl)
+{
+       int ret;
+
+       switch (ctl->iface) {
+       case SNDRV_CTL_ELEM_IFACE_MIXER:
+               switch (ctl->info.type) {
+               case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+                       ret = gbaudio_tplg_create_enum_ctl(gb, kctl, ctl);
+                       break;
+               default:
+                       ret = gbaudio_tplg_create_mixer_ctl(gb, kctl, ctl);
+                       break;
+               }
+               break;
+       default:
+               return -EINVAL;
+
+       }
+
+       dev_dbg(gb->dev, "%s:%d DAPM control created, ret:%d\n", ctl->name,
+               ctl->id, ret);
+       return ret;
+}
+
+static int gbaudio_widget_event(struct snd_soc_dapm_widget *w,
+                               struct snd_kcontrol *kcontrol, int event)
+{
+       int wid;
+       int ret;
+       struct snd_soc_codec *codec = w->codec;
+       struct gbaudio_codec_info *gbcodec = snd_soc_codec_get_drvdata(codec);
+       struct gbaudio_module_info *module;
+       struct gb_bundle *bundle;
+
+       dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+       /* Find relevant module */
+       module = find_gb_module(gbcodec, w->name);
+       if (!module)
+               return -EINVAL;
+
+       /* map name to widget id */
+       wid = gbaudio_map_widgetname(module, w->name);
+       if (wid < 0) {
+               dev_err(codec->dev, "Invalid widget name:%s\n", w->name);
+               return -EINVAL;
+       }
+
+       bundle = to_gb_bundle(module->dev);
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret)
+               return ret;
+
+       switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               ret = gb_audio_gb_enable_widget(module->mgmt_connection, wid);
+               if (!ret)
+                       ret = gbaudio_module_update(gbcodec, w, module, 1);
+               break;
+       case SND_SOC_DAPM_POST_PMD:
+               ret = gb_audio_gb_disable_widget(module->mgmt_connection, wid);
+               if (!ret)
+                       ret = gbaudio_module_update(gbcodec, w, module, 0);
+               break;
+       }
+       if (ret)
+               dev_err_ratelimited(codec->dev,
+                                   "%d: widget, event:%d failed:%d\n", wid,
+                                   event, ret);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return ret;
+}
+
+static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
+                                     struct snd_soc_dapm_widget *dw,
+                                     struct gb_audio_widget *w, int *w_size)
+{
+       int i, ret, csize;
+       struct snd_kcontrol_new *widget_kctls;
+       struct gb_audio_control *curr;
+       struct gbaudio_control *control, *_control;
+       size_t size;
+       char temp_name[NAME_SIZE];
+
+       ret = gbaudio_validate_kcontrol_count(w);
+       if (ret) {
+               dev_err(module->dev, "Inavlid kcontrol count=%d for %s\n",
+                       w->ncontrols, w->name);
+               return ret;
+       }
+
+       /* allocate memory for kcontrol */
+       if (w->ncontrols) {
+               size = sizeof(struct snd_kcontrol_new) * w->ncontrols;
+               widget_kctls = devm_kzalloc(module->dev, size, GFP_KERNEL);
+               if (!widget_kctls)
+                       return -ENOMEM;
+       }
+
+       *w_size = sizeof(struct gb_audio_widget);
+
+       /* create relevant kcontrols */
+       curr = w->ctl;
+       for (i = 0; i < w->ncontrols; i++) {
+               ret = gbaudio_tplg_create_wcontrol(module, &widget_kctls[i],
+                                                  curr);
+               if (ret) {
+                       dev_err(module->dev,
+                               "%s:%d type widget_ctl not supported\n",
+                               curr->name, curr->iface);
+                       goto error;
+               }
+               control = devm_kzalloc(module->dev,
+                                      sizeof(struct gbaudio_control),
+                                      GFP_KERNEL);
+               if (!control) {
+                       ret = -ENOMEM;
+                       goto error;
+               }
+               control->id = curr->id;
+               control->name = curr->name;
+               control->wname = w->name;
+
+               if (curr->info.type == GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED) {
+                       struct gb_audio_enumerated *gbenum =
+                               &curr->info.value.enumerated;
+
+                       csize = offsetof(struct gb_audio_control, info);
+                       csize += offsetof(struct gb_audio_ctl_elem_info, value);
+                       csize += offsetof(struct gb_audio_enumerated, names);
+                       csize += gbenum->names_length;
+                       control->texts = (const char * const *)
+                               gb_generate_enum_strings(module, gbenum);
+                       control->items = gbenum->items;
+               } else
+                       csize = sizeof(struct gb_audio_control);
+               *w_size += csize;
+               curr = (void *)curr + csize;
+               list_add(&control->list, &module->widget_ctl_list);
+               dev_dbg(module->dev, "%s: control of type %d created\n",
+                       widget_kctls[i].name, widget_kctls[i].iface);
+       }
+
+       /* Prefix dev_id to widget control_name */
+       strlcpy(temp_name, w->name, NAME_SIZE);
+       snprintf(w->name, NAME_SIZE, "GB %d %s", module->dev_id, temp_name);
+
+       switch (w->type) {
+       case snd_soc_dapm_spk:
+               *dw = (struct snd_soc_dapm_widget)
+                       SND_SOC_DAPM_SPK(w->name, gbcodec_event_spk);
+               module->op_devices |= GBAUDIO_DEVICE_OUT_SPEAKER;
+               break;
+       case snd_soc_dapm_hp:
+               *dw = (struct snd_soc_dapm_widget)
+                       SND_SOC_DAPM_HP(w->name, gbcodec_event_hp);
+               module->op_devices |= (GBAUDIO_DEVICE_OUT_WIRED_HEADSET
+                                       | GBAUDIO_DEVICE_OUT_WIRED_HEADPHONE);
+               module->ip_devices |= GBAUDIO_DEVICE_IN_WIRED_HEADSET;
+               break;
+       case snd_soc_dapm_mic:
+               *dw = (struct snd_soc_dapm_widget)
+                       SND_SOC_DAPM_MIC(w->name, gbcodec_event_int_mic);
+               module->ip_devices |= GBAUDIO_DEVICE_IN_BUILTIN_MIC;
+               break;
+       case snd_soc_dapm_output:
+               *dw = (struct snd_soc_dapm_widget)SND_SOC_DAPM_OUTPUT(w->name);
+               break;
+       case snd_soc_dapm_input:
+               *dw = (struct snd_soc_dapm_widget)SND_SOC_DAPM_INPUT(w->name);
+               break;
+       case snd_soc_dapm_switch:
+               *dw = (struct snd_soc_dapm_widget)
+                       SND_SOC_DAPM_SWITCH_E(w->name, SND_SOC_NOPM, 0, 0,
+                                           widget_kctls, gbaudio_widget_event,
+                                           SND_SOC_DAPM_PRE_PMU |
+                                           SND_SOC_DAPM_POST_PMD);
+               break;
+       case snd_soc_dapm_pga:
+               *dw = (struct snd_soc_dapm_widget)
+                       SND_SOC_DAPM_PGA_E(w->name, SND_SOC_NOPM, 0, 0, NULL, 0,
+                                          gbaudio_widget_event,
+                                          SND_SOC_DAPM_PRE_PMU |
+                                          SND_SOC_DAPM_POST_PMD);
+               break;
+       case snd_soc_dapm_mixer:
+               *dw = (struct snd_soc_dapm_widget)
+                       SND_SOC_DAPM_MIXER_E(w->name, SND_SOC_NOPM, 0, 0, NULL,
+                                          0, gbaudio_widget_event,
+                                          SND_SOC_DAPM_PRE_PMU |
+                                          SND_SOC_DAPM_POST_PMD);
+               break;
+       case snd_soc_dapm_mux:
+               *dw = (struct snd_soc_dapm_widget)
+                       SND_SOC_DAPM_MUX_E(w->name, SND_SOC_NOPM, 0, 0,
+                                        widget_kctls, gbaudio_widget_event,
+                                        SND_SOC_DAPM_PRE_PMU |
+                                        SND_SOC_DAPM_POST_PMD);
+               break;
+       case snd_soc_dapm_aif_in:
+               *dw = (struct snd_soc_dapm_widget)
+                       SND_SOC_DAPM_AIF_IN_E(w->name, w->sname, 0,
+                                             SND_SOC_NOPM,
+                                             0, 0, gbaudio_widget_event,
+                                             SND_SOC_DAPM_PRE_PMU |
+                                             SND_SOC_DAPM_POST_PMD);
+               break;
+       case snd_soc_dapm_aif_out:
+               *dw = (struct snd_soc_dapm_widget)
+                       SND_SOC_DAPM_AIF_OUT_E(w->name, w->sname, 0,
+                                              SND_SOC_NOPM,
+                                              0, 0, gbaudio_widget_event,
+                                              SND_SOC_DAPM_PRE_PMU |
+                                              SND_SOC_DAPM_POST_PMD);
+               break;
+       default:
+               ret = -EINVAL;
+               goto error;
+       }
+
+       dev_dbg(module->dev, "%s: widget of type %d created\n", dw->name,
+               dw->id);
+       return 0;
+error:
+       list_for_each_entry_safe(control, _control, &module->widget_ctl_list,
+                                list) {
+               list_del(&control->list);
+               devm_kfree(module->dev, control);
+       }
+       return ret;
+}
+
+static int gbaudio_tplg_process_kcontrols(struct gbaudio_module_info *module,
+                                  struct gb_audio_control *controls)
+{
+       int i, csize, ret;
+       struct snd_kcontrol_new *dapm_kctls;
+       struct gb_audio_control *curr;
+       struct gbaudio_control *control, *_control;
+       size_t size;
+       char temp_name[NAME_SIZE];
+
+       size = sizeof(struct snd_kcontrol_new) * module->num_controls;
+       dapm_kctls = devm_kzalloc(module->dev, size, GFP_KERNEL);
+       if (!dapm_kctls)
+               return -ENOMEM;
+
+       curr = controls;
+       for (i = 0; i < module->num_controls; i++) {
+               ret = gbaudio_tplg_create_kcontrol(module, &dapm_kctls[i],
+                                                  curr);
+               if (ret) {
+                       dev_err(module->dev, "%s:%d type not supported\n",
+                               curr->name, curr->iface);
+                       goto error;
+               }
+               control = devm_kzalloc(module->dev, sizeof(struct
+                                                          gbaudio_control),
+                                     GFP_KERNEL);
+               if (!control) {
+                       ret = -ENOMEM;
+                       goto error;
+               }
+               control->id = curr->id;
+               /* Prefix dev_id to widget_name */
+               strlcpy(temp_name, curr->name, NAME_SIZE);
+               snprintf(curr->name, NAME_SIZE, "GB %d %s", module->dev_id,
+                        temp_name);
+               control->name = curr->name;
+               if (curr->info.type == GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED) {
+                       struct gb_audio_enumerated *gbenum =
+                               &curr->info.value.enumerated;
+
+                       csize = offsetof(struct gb_audio_control, info);
+                       csize += offsetof(struct gb_audio_ctl_elem_info, value);
+                       csize += offsetof(struct gb_audio_enumerated, names);
+                       csize += gbenum->names_length;
+                       control->texts = (const char * const *)
+                               gb_generate_enum_strings(module, gbenum);
+                       control->items = gbenum->items;
+               } else
+                       csize = sizeof(struct gb_audio_control);
+
+               list_add(&control->list, &module->ctl_list);
+               dev_dbg(module->dev, "%d:%s created of type %d\n", curr->id,
+                       curr->name, curr->info.type);
+               curr = (void *)curr + csize;
+       }
+       module->controls = dapm_kctls;
+
+       return 0;
+error:
+       list_for_each_entry_safe(control, _control, &module->ctl_list,
+                                list) {
+               list_del(&control->list);
+               devm_kfree(module->dev, control);
+       }
+       devm_kfree(module->dev, dapm_kctls);
+       return ret;
+}
+
+static int gbaudio_tplg_process_widgets(struct gbaudio_module_info *module,
+                                  struct gb_audio_widget *widgets)
+{
+       int i, ret, w_size;
+       struct snd_soc_dapm_widget *dapm_widgets;
+       struct gb_audio_widget *curr;
+       struct gbaudio_widget *widget, *_widget;
+       size_t size;
+
+       size = sizeof(struct snd_soc_dapm_widget) * module->num_dapm_widgets;
+       dapm_widgets = devm_kzalloc(module->dev, size, GFP_KERNEL);
+       if (!dapm_widgets)
+               return -ENOMEM;
+
+       curr = widgets;
+       for (i = 0; i < module->num_dapm_widgets; i++) {
+               ret = gbaudio_tplg_create_widget(module, &dapm_widgets[i],
+                                                curr, &w_size);
+               if (ret) {
+                       dev_err(module->dev, "%s:%d type not supported\n",
+                               curr->name, curr->type);
+                       goto error;
+               }
+               widget = devm_kzalloc(module->dev, sizeof(struct
+                                                          gbaudio_widget),
+                                     GFP_KERNEL);
+               if (!widget) {
+                       ret = -ENOMEM;
+                       goto error;
+               }
+               widget->id = curr->id;
+               widget->name = curr->name;
+               list_add(&widget->list, &module->widget_list);
+               curr = (void *)curr + w_size;
+       }
+       module->dapm_widgets = dapm_widgets;
+
+       return 0;
+
+error:
+       list_for_each_entry_safe(widget, _widget, &module->widget_list,
+                                list) {
+               list_del(&widget->list);
+               devm_kfree(module->dev, widget);
+       }
+       devm_kfree(module->dev, dapm_widgets);
+       return ret;
+}
+
+static int gbaudio_tplg_process_routes(struct gbaudio_module_info *module,
+                                  struct gb_audio_route *routes)
+{
+       int i, ret;
+       struct snd_soc_dapm_route *dapm_routes;
+       struct gb_audio_route *curr;
+       size_t size;
+
+       size = sizeof(struct snd_soc_dapm_route) * module->num_dapm_routes;
+       dapm_routes = devm_kzalloc(module->dev, size, GFP_KERNEL);
+       if (!dapm_routes)
+               return -ENOMEM;
+
+       module->dapm_routes = dapm_routes;
+       curr = routes;
+
+       for (i = 0; i < module->num_dapm_routes; i++) {
+               dapm_routes->sink =
+                       gbaudio_map_widgetid(module, curr->destination_id);
+               if (!dapm_routes->sink) {
+                       dev_err(module->dev, "%d:%d:%d:%d - Invalid sink\n",
+                               curr->source_id, curr->destination_id,
+                               curr->control_id, curr->index);
+                       ret = -EINVAL;
+                       goto error;
+               }
+               dapm_routes->source =
+                       gbaudio_map_widgetid(module, curr->source_id);
+               if (!dapm_routes->source) {
+                       dev_err(module->dev, "%d:%d:%d:%d - Invalid source\n",
+                               curr->source_id, curr->destination_id,
+                               curr->control_id, curr->index);
+                       ret = -EINVAL;
+                       goto error;
+               }
+               dapm_routes->control =
+                       gbaudio_map_controlid(module,
+                                                     curr->control_id,
+                                                     curr->index);
+               if ((curr->control_id !=  GBAUDIO_INVALID_ID) &&
+                   !dapm_routes->control) {
+                       dev_err(module->dev, "%d:%d:%d:%d - Invalid control\n",
+                               curr->source_id, curr->destination_id,
+                               curr->control_id, curr->index);
+                       ret = -EINVAL;
+                       goto error;
+               }
+               dev_dbg(module->dev, "Route {%s, %s, %s}\n", dapm_routes->sink,
+                       (dapm_routes->control) ? dapm_routes->control:"NULL",
+                       dapm_routes->source);
+               dapm_routes++;
+               curr++;
+       }
+
+       return 0;
+
+error:
+       devm_kfree(module->dev, module->dapm_routes);
+       return ret;
+}
+
+static int gbaudio_tplg_process_header(struct gbaudio_module_info *module,
+                                struct gb_audio_topology *tplg_data)
+{
+       /* fetch no. of kcontrols, widgets & routes */
+       module->num_controls = tplg_data->num_controls;
+       module->num_dapm_widgets = tplg_data->num_widgets;
+       module->num_dapm_routes = tplg_data->num_routes;
+
+       /* update block offset */
+       module->dai_offset = (unsigned long)&tplg_data->data;
+       module->control_offset = module->dai_offset + tplg_data->size_dais;
+       module->widget_offset = module->control_offset +
+               tplg_data->size_controls;
+       module->route_offset = module->widget_offset +
+               tplg_data->size_widgets;
+
+       dev_dbg(module->dev, "DAI offset is 0x%lx\n", module->dai_offset);
+       dev_dbg(module->dev, "control offset is %lx\n",
+               module->control_offset);
+       dev_dbg(module->dev, "widget offset is %lx\n", module->widget_offset);
+       dev_dbg(module->dev, "route offset is %lx\n", module->route_offset);
+
+       return 0;
+}
+
+int gbaudio_tplg_parse_data(struct gbaudio_module_info *module,
+                              struct gb_audio_topology *tplg_data)
+{
+       int ret;
+       struct gb_audio_control *controls;
+       struct gb_audio_widget *widgets;
+       struct gb_audio_route *routes;
+
+       if (!tplg_data)
+               return -EINVAL;
+
+       ret = gbaudio_tplg_process_header(module, tplg_data);
+       if (ret) {
+               dev_err(module->dev, "%d: Error in parsing topology header\n",
+                       ret);
+               return ret;
+       }
+
+       /* process control */
+       controls = (struct gb_audio_control *)module->control_offset;
+       ret = gbaudio_tplg_process_kcontrols(module, controls);
+       if (ret) {
+               dev_err(module->dev,
+                       "%d: Error in parsing controls data\n", ret);
+               return ret;
+       }
+       dev_dbg(module->dev, "Control parsing finished\n");
+
+       /* process widgets */
+       widgets = (struct gb_audio_widget *)module->widget_offset;
+       ret = gbaudio_tplg_process_widgets(module, widgets);
+       if (ret) {
+               dev_err(module->dev,
+                       "%d: Error in parsing widgets data\n", ret);
+               return ret;
+       }
+       dev_dbg(module->dev, "Widget parsing finished\n");
+
+       /* process route */
+       routes = (struct gb_audio_route *)module->route_offset;
+       ret = gbaudio_tplg_process_routes(module, routes);
+       if (ret) {
+               dev_err(module->dev,
+                       "%d: Error in parsing routes data\n", ret);
+               return ret;
+       }
+       dev_dbg(module->dev, "Route parsing finished\n");
+
+       /* parse jack capabilities */
+       if (tplg_data->jack_type) {
+               module->jack_mask = tplg_data->jack_type & GBCODEC_JACK_MASK;
+               module->button_mask = tplg_data->jack_type &
+                       GBCODEC_JACK_BUTTON_MASK;
+       }
+
+       return ret;
+}
+
+void gbaudio_tplg_release(struct gbaudio_module_info *module)
+{
+       struct gbaudio_control *control, *_control;
+       struct gbaudio_widget *widget, *_widget;
+
+       if (!module->topology)
+               return;
+
+       /* release kcontrols */
+       list_for_each_entry_safe(control, _control, &module->ctl_list,
+                                list) {
+               list_del(&control->list);
+               devm_kfree(module->dev, control);
+       }
+       if (module->controls)
+               devm_kfree(module->dev, module->controls);
+
+       /* release widget controls */
+       list_for_each_entry_safe(control, _control, &module->widget_ctl_list,
+                                list) {
+               list_del(&control->list);
+               devm_kfree(module->dev, control);
+       }
+
+       /* release widgets */
+       list_for_each_entry_safe(widget, _widget, &module->widget_list,
+                                list) {
+               list_del(&widget->list);
+               devm_kfree(module->dev, widget);
+       }
+       if (module->dapm_widgets)
+               devm_kfree(module->dev, module->dapm_widgets);
+
+       /* release routes */
+       if (module->dapm_routes)
+               devm_kfree(module->dev, module->dapm_routes);
+}
diff --git a/drivers/staging/greybus/authentication.c b/drivers/staging/greybus/authentication.c
new file mode 100644 (file)
index 0000000..168626b
--- /dev/null
@@ -0,0 +1,429 @@
+/*
+ * Greybus Component Authentication Protocol (CAP) Driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+
+#include "greybus_authentication.h"
+#include "firmware.h"
+#include "greybus.h"
+
+#define CAP_TIMEOUT_MS         1000
+
+/*
+ * Number of minor devices this driver supports.
+ * There will be exactly one required per Interface.
+ */
+#define NUM_MINORS             U8_MAX
+
+struct gb_cap {
+       struct device           *parent;
+       struct gb_connection    *connection;
+       struct kref             kref;
+       struct list_head        node;
+       bool                    disabled; /* connection getting disabled */
+
+       struct mutex            mutex;
+       struct cdev             cdev;
+       struct device           *class_device;
+       dev_t                   dev_num;
+};
+
+static struct class *cap_class;
+static dev_t cap_dev_num;
+static DEFINE_IDA(cap_minors_map);
+static LIST_HEAD(cap_list);
+static DEFINE_MUTEX(list_mutex);
+
+static void cap_kref_release(struct kref *kref)
+{
+       struct gb_cap *cap = container_of(kref, struct gb_cap, kref);
+
+       kfree(cap);
+}
+
+/*
+ * All users of cap take a reference (from within list_mutex lock), before
+ * they get a pointer to play with. And the structure will be freed only after
+ * the last user has put the reference to it.
+ */
+static void put_cap(struct gb_cap *cap)
+{
+       kref_put(&cap->kref, cap_kref_release);
+}
+
+/* Caller must call put_cap() after using struct gb_cap */
+static struct gb_cap *get_cap(struct cdev *cdev)
+{
+       struct gb_cap *cap;
+
+       mutex_lock(&list_mutex);
+
+       list_for_each_entry(cap, &cap_list, node) {
+               if (&cap->cdev == cdev) {
+                       kref_get(&cap->kref);
+                       goto unlock;
+               }
+       }
+
+       cap = NULL;
+
+unlock:
+       mutex_unlock(&list_mutex);
+
+       return cap;
+}
+
+static int cap_get_endpoint_uid(struct gb_cap *cap, u8 *euid)
+{
+       struct gb_connection *connection = cap->connection;
+       struct gb_cap_get_endpoint_uid_response response;
+       int ret;
+
+       ret = gb_operation_sync(connection, GB_CAP_TYPE_GET_ENDPOINT_UID, NULL,
+                               0, &response, sizeof(response));
+       if (ret) {
+               dev_err(cap->parent, "failed to get endpoint uid (%d)\n", ret);
+               return ret;
+       }
+
+       memcpy(euid, response.uid, sizeof(response.uid));
+
+       return 0;
+}
+
+static int cap_get_ims_certificate(struct gb_cap *cap, u32 class, u32 id,
+                                  u8 *certificate, u32 *size, u8 *result)
+{
+       struct gb_connection *connection = cap->connection;
+       struct gb_cap_get_ims_certificate_request *request;
+       struct gb_cap_get_ims_certificate_response *response;
+       size_t max_size = gb_operation_get_payload_size_max(connection);
+       struct gb_operation *op;
+       int ret;
+
+       op = gb_operation_create_flags(connection,
+                                      GB_CAP_TYPE_GET_IMS_CERTIFICATE,
+                                      sizeof(*request), max_size,
+                                      GB_OPERATION_FLAG_SHORT_RESPONSE,
+                                      GFP_KERNEL);
+       if (!op)
+               return -ENOMEM;
+
+       request = op->request->payload;
+       request->certificate_class = cpu_to_le32(class);
+       request->certificate_id = cpu_to_le32(id);
+
+       ret = gb_operation_request_send_sync(op);
+       if (ret) {
+               dev_err(cap->parent, "failed to get certificate (%d)\n", ret);
+               goto done;
+       }
+
+       response = op->response->payload;
+       *result = response->result_code;
+       *size = op->response->payload_size - sizeof(*response);
+       memcpy(certificate, response->certificate, *size);
+
+done:
+       gb_operation_put(op);
+       return ret;
+}
+
+static int cap_authenticate(struct gb_cap *cap, u32 auth_type, u8 *uid,
+                           u8 *challenge, u8 *result, u8 *auth_response,
+                           u32 *signature_size, u8 *signature)
+{
+       struct gb_connection *connection = cap->connection;
+       struct gb_cap_authenticate_request *request;
+       struct gb_cap_authenticate_response *response;
+       size_t max_size = gb_operation_get_payload_size_max(connection);
+       struct gb_operation *op;
+       int ret;
+
+       op = gb_operation_create_flags(connection, GB_CAP_TYPE_AUTHENTICATE,
+                                      sizeof(*request), max_size,
+                                      GB_OPERATION_FLAG_SHORT_RESPONSE,
+                                      GFP_KERNEL);
+       if (!op)
+               return -ENOMEM;
+
+       request = op->request->payload;
+       request->auth_type = cpu_to_le32(auth_type);
+       memcpy(request->uid, uid, sizeof(request->uid));
+       memcpy(request->challenge, challenge, sizeof(request->challenge));
+
+       ret = gb_operation_request_send_sync(op);
+       if (ret) {
+               dev_err(cap->parent, "failed to authenticate (%d)\n", ret);
+               goto done;
+       }
+
+       response = op->response->payload;
+       *result = response->result_code;
+       *signature_size = op->response->payload_size - sizeof(*response);
+       memcpy(auth_response, response->response, sizeof(response->response));
+       memcpy(signature, response->signature, *signature_size);
+
+done:
+       gb_operation_put(op);
+       return ret;
+}
+
+/* Char device fops */
+
+static int cap_open(struct inode *inode, struct file *file)
+{
+       struct gb_cap *cap = get_cap(inode->i_cdev);
+
+       /* cap structure can't get freed until file descriptor is closed */
+       if (cap) {
+               file->private_data = cap;
+               return 0;
+       }
+
+       return -ENODEV;
+}
+
+static int cap_release(struct inode *inode, struct file *file)
+{
+       struct gb_cap *cap = file->private_data;
+
+       put_cap(cap);
+       return 0;
+}
+
+static int cap_ioctl(struct gb_cap *cap, unsigned int cmd,
+                        void __user *buf)
+{
+       struct cap_ioc_get_endpoint_uid endpoint_uid;
+       struct cap_ioc_get_ims_certificate *ims_cert;
+       struct cap_ioc_authenticate *authenticate;
+       size_t size;
+       int ret;
+
+       switch (cmd) {
+       case CAP_IOC_GET_ENDPOINT_UID:
+               ret = cap_get_endpoint_uid(cap, endpoint_uid.uid);
+               if (ret)
+                       return ret;
+
+               if (copy_to_user(buf, &endpoint_uid, sizeof(endpoint_uid)))
+                       return -EFAULT;
+
+               return 0;
+       case CAP_IOC_GET_IMS_CERTIFICATE:
+               size = sizeof(*ims_cert);
+               ims_cert = memdup_user(buf, size);
+               if (IS_ERR(ims_cert))
+                       return PTR_ERR(ims_cert);
+
+               ret = cap_get_ims_certificate(cap, ims_cert->certificate_class,
+                                             ims_cert->certificate_id,
+                                             ims_cert->certificate,
+                                             &ims_cert->cert_size,
+                                             &ims_cert->result_code);
+               if (!ret && copy_to_user(buf, ims_cert, size))
+                       ret = -EFAULT;
+               kfree(ims_cert);
+
+               return ret;
+       case CAP_IOC_AUTHENTICATE:
+               size = sizeof(*authenticate);
+               authenticate = memdup_user(buf, size);
+               if (IS_ERR(authenticate))
+                       return PTR_ERR(authenticate);
+
+               ret = cap_authenticate(cap, authenticate->auth_type,
+                                      authenticate->uid,
+                                      authenticate->challenge,
+                                      &authenticate->result_code,
+                                      authenticate->response,
+                                      &authenticate->signature_size,
+                                      authenticate->signature);
+               if (!ret && copy_to_user(buf, authenticate, size))
+                       ret = -EFAULT;
+               kfree(authenticate);
+
+               return ret;
+       default:
+               return -ENOTTY;
+       }
+}
+
+static long cap_ioctl_unlocked(struct file *file, unsigned int cmd,
+                              unsigned long arg)
+{
+       struct gb_cap *cap = file->private_data;
+       struct gb_bundle *bundle = cap->connection->bundle;
+       int ret = -ENODEV;
+
+       /*
+        * Serialize ioctls.
+        *
+        * We don't want the user to do multiple authentication operations in
+        * parallel.
+        *
+        * This is also used to protect ->disabled, which is used to check if
+        * the connection is getting disconnected, so that we don't start any
+        * new operations.
+        */
+       mutex_lock(&cap->mutex);
+       if (!cap->disabled) {
+               ret = gb_pm_runtime_get_sync(bundle);
+               if (!ret) {
+                       ret = cap_ioctl(cap, cmd, (void __user *)arg);
+                       gb_pm_runtime_put_autosuspend(bundle);
+               }
+       }
+       mutex_unlock(&cap->mutex);
+
+       return ret;
+}
+
+static const struct file_operations cap_fops = {
+       .owner          = THIS_MODULE,
+       .open           = cap_open,
+       .release        = cap_release,
+       .unlocked_ioctl = cap_ioctl_unlocked,
+};
+
+int gb_cap_connection_init(struct gb_connection *connection)
+{
+       struct gb_cap *cap;
+       int ret, minor;
+
+       if (!connection)
+               return 0;
+
+       cap = kzalloc(sizeof(*cap), GFP_KERNEL);
+       if (!cap)
+               return -ENOMEM;
+
+       cap->parent = &connection->bundle->dev;
+       cap->connection = connection;
+       mutex_init(&cap->mutex);
+       gb_connection_set_data(connection, cap);
+       kref_init(&cap->kref);
+
+       mutex_lock(&list_mutex);
+       list_add(&cap->node, &cap_list);
+       mutex_unlock(&list_mutex);
+
+       ret = gb_connection_enable(connection);
+       if (ret)
+               goto err_list_del;
+
+       minor = ida_simple_get(&cap_minors_map, 0, NUM_MINORS, GFP_KERNEL);
+       if (minor < 0) {
+               ret = minor;
+               goto err_connection_disable;
+       }
+
+       /* Add a char device to allow userspace to interact with cap */
+       cap->dev_num = MKDEV(MAJOR(cap_dev_num), minor);
+       cdev_init(&cap->cdev, &cap_fops);
+
+       ret = cdev_add(&cap->cdev, cap->dev_num, 1);
+       if (ret)
+               goto err_remove_ida;
+
+       /* Add a soft link to the previously added char-dev within the bundle */
+       cap->class_device = device_create(cap_class, cap->parent, cap->dev_num,
+                                         NULL, "gb-authenticate-%d", minor);
+       if (IS_ERR(cap->class_device)) {
+               ret = PTR_ERR(cap->class_device);
+               goto err_del_cdev;
+       }
+
+       return 0;
+
+err_del_cdev:
+       cdev_del(&cap->cdev);
+err_remove_ida:
+       ida_simple_remove(&cap_minors_map, minor);
+err_connection_disable:
+       gb_connection_disable(connection);
+err_list_del:
+       mutex_lock(&list_mutex);
+       list_del(&cap->node);
+       mutex_unlock(&list_mutex);
+
+       put_cap(cap);
+
+       return ret;
+}
+
+void gb_cap_connection_exit(struct gb_connection *connection)
+{
+       struct gb_cap *cap;
+
+       if (!connection)
+               return;
+
+       cap = gb_connection_get_data(connection);
+
+       device_destroy(cap_class, cap->dev_num);
+       cdev_del(&cap->cdev);
+       ida_simple_remove(&cap_minors_map, MINOR(cap->dev_num));
+
+       /*
+        * Disallow any new ioctl operations on the char device and wait for
+        * existing ones to finish.
+        */
+       mutex_lock(&cap->mutex);
+       cap->disabled = true;
+       mutex_unlock(&cap->mutex);
+
+       /* All pending greybus operations should have finished by now */
+       gb_connection_disable(cap->connection);
+
+       /* Disallow new users to get access to the cap structure */
+       mutex_lock(&list_mutex);
+       list_del(&cap->node);
+       mutex_unlock(&list_mutex);
+
+       /*
+        * All current users of cap would have taken a reference to it by
+        * now, we can drop our reference and wait the last user will get
+        * cap freed.
+        */
+       put_cap(cap);
+}
+
+int cap_init(void)
+{
+       int ret;
+
+       cap_class = class_create(THIS_MODULE, "gb_authenticate");
+       if (IS_ERR(cap_class))
+               return PTR_ERR(cap_class);
+
+       ret = alloc_chrdev_region(&cap_dev_num, 0, NUM_MINORS,
+                                 "gb_authenticate");
+       if (ret)
+               goto err_remove_class;
+
+       return 0;
+
+err_remove_class:
+       class_destroy(cap_class);
+       return ret;
+}
+
+void cap_exit(void)
+{
+       unregister_chrdev_region(cap_dev_num, NUM_MINORS);
+       class_destroy(cap_class);
+       ida_destroy(&cap_minors_map);
+}
diff --git a/drivers/staging/greybus/bootrom.c b/drivers/staging/greybus/bootrom.c
new file mode 100644 (file)
index 0000000..5f90721
--- /dev/null
@@ -0,0 +1,524 @@
+/*
+ * BOOTROM Greybus driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+#include "greybus.h"
+#include "firmware.h"
+
+/* Timeout, in jiffies, within which the next request must be received */
+#define NEXT_REQ_TIMEOUT_MS    1000
+
+/*
+ * FIXME: Reduce this timeout once svc core handles parallel processing of
+ * events from the SVC, which are handled sequentially today.
+ */
+#define MODE_SWITCH_TIMEOUT_MS 10000
+
+enum next_request_type {
+       NEXT_REQ_FIRMWARE_SIZE,
+       NEXT_REQ_GET_FIRMWARE,
+       NEXT_REQ_READY_TO_BOOT,
+       NEXT_REQ_MODE_SWITCH,
+};
+
+struct gb_bootrom {
+       struct gb_connection    *connection;
+       const struct firmware   *fw;
+       u8                      protocol_major;
+       u8                      protocol_minor;
+       enum next_request_type  next_request;
+       struct delayed_work     dwork;
+       struct mutex            mutex; /* Protects bootrom->fw */
+};
+
+static void free_firmware(struct gb_bootrom *bootrom)
+{
+       if (!bootrom->fw)
+               return;
+
+       release_firmware(bootrom->fw);
+       bootrom->fw = NULL;
+}
+
+static void gb_bootrom_timedout(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct gb_bootrom *bootrom = container_of(dwork, struct gb_bootrom, dwork);
+       struct device *dev = &bootrom->connection->bundle->dev;
+       const char *reason;
+
+       switch (bootrom->next_request) {
+       case NEXT_REQ_FIRMWARE_SIZE:
+               reason = "Firmware Size Request";
+               break;
+       case NEXT_REQ_GET_FIRMWARE:
+               reason = "Get Firmware Request";
+               break;
+       case NEXT_REQ_READY_TO_BOOT:
+               reason = "Ready to Boot Request";
+               break;
+       case NEXT_REQ_MODE_SWITCH:
+               reason = "Interface Mode Switch";
+               break;
+       default:
+               reason = NULL;
+               dev_err(dev, "Invalid next-request: %u", bootrom->next_request);
+               break;
+       }
+
+       dev_err(dev, "Timed out waiting for %s from the Module\n", reason);
+
+       mutex_lock(&bootrom->mutex);
+       free_firmware(bootrom);
+       mutex_unlock(&bootrom->mutex);
+
+       /* TODO: Power-off Module ? */
+}
+
+static void gb_bootrom_set_timeout(struct gb_bootrom *bootrom,
+                       enum next_request_type next, unsigned long timeout)
+{
+       bootrom->next_request = next;
+       schedule_delayed_work(&bootrom->dwork, msecs_to_jiffies(timeout));
+}
+
+static void gb_bootrom_cancel_timeout(struct gb_bootrom *bootrom)
+{
+       cancel_delayed_work_sync(&bootrom->dwork);
+}
+
+/*
+ * The es2 chip doesn't have VID/PID programmed into the hardware and we need to
+ * hack that up to distinguish different modules and their firmware blobs.
+ *
+ * This fetches VID/PID (over bootrom protocol) for es2 chip only, when VID/PID
+ * already sent during hotplug are 0.
+ *
+ * Otherwise, we keep intf->vendor_id/product_id same as what's passed
+ * during hotplug.
+ */
+static void bootrom_es2_fixup_vid_pid(struct gb_bootrom *bootrom)
+{
+       struct gb_bootrom_get_vid_pid_response response;
+       struct gb_connection *connection = bootrom->connection;
+       struct gb_interface *intf = connection->bundle->intf;
+       int ret;
+
+       if (!(intf->quirks & GB_INTERFACE_QUIRK_NO_GMP_IDS))
+               return;
+
+       ret = gb_operation_sync(connection, GB_BOOTROM_TYPE_GET_VID_PID,
+                               NULL, 0, &response, sizeof(response));
+       if (ret) {
+               dev_err(&connection->bundle->dev,
+                       "Bootrom get vid/pid operation failed (%d)\n", ret);
+               return;
+       }
+
+       /*
+        * NOTE: This is hacked, so that the same values of VID/PID can be used
+        * by next firmware level as well. The uevent for bootrom will still
+        * have VID/PID as 0, though after this point the sysfs files will start
+        * showing the updated values. But yeah, that's a bit racy as the same
+        * sysfs files would be showing 0 before this point.
+        */
+       intf->vendor_id = le32_to_cpu(response.vendor_id);
+       intf->product_id = le32_to_cpu(response.product_id);
+
+       dev_dbg(&connection->bundle->dev, "Bootrom got vid (0x%x)/pid (0x%x)\n",
+               intf->vendor_id, intf->product_id);
+}
+
+/* This returns path of the firmware blob on the disk */
+static int find_firmware(struct gb_bootrom *bootrom, u8 stage)
+{
+       struct gb_connection *connection = bootrom->connection;
+       struct gb_interface *intf = connection->bundle->intf;
+       char firmware_name[49];
+       int rc;
+
+       /* Already have a firmware, free it */
+       free_firmware(bootrom);
+
+       /* Bootrom protocol is only supported for loading Stage 2 firmware */
+       if (stage != 2) {
+               dev_err(&connection->bundle->dev, "Invalid boot stage: %u\n",
+                       stage);
+               return -EINVAL;
+       }
+
+       /*
+        * Create firmware name
+        *
+        * XXX Name it properly..
+        */
+       snprintf(firmware_name, sizeof(firmware_name),
+                FW_NAME_PREFIX "%08x_%08x_%08x_%08x_s2l.tftf",
+                intf->ddbl1_manufacturer_id, intf->ddbl1_product_id,
+                intf->vendor_id, intf->product_id);
+
+       // FIXME:
+       // Turn to dev_dbg later after everyone has valid bootloaders with good
+       // ids, but leave this as dev_info for now to make it easier to track
+       // down "empty" vid/pid modules.
+       dev_info(&connection->bundle->dev, "Firmware file '%s' requested\n",
+                firmware_name);
+
+       rc = request_firmware(&bootrom->fw, firmware_name,
+               &connection->bundle->dev);
+       if (rc) {
+               dev_err(&connection->bundle->dev,
+                       "failed to find %s firmware (%d)\n", firmware_name, rc);
+       }
+
+       return rc;
+}
+
+static int gb_bootrom_firmware_size_request(struct gb_operation *op)
+{
+       struct gb_bootrom *bootrom = gb_connection_get_data(op->connection);
+       struct gb_bootrom_firmware_size_request *size_request = op->request->payload;
+       struct gb_bootrom_firmware_size_response *size_response;
+       struct device *dev = &op->connection->bundle->dev;
+       int ret;
+
+       /* Disable timeouts */
+       gb_bootrom_cancel_timeout(bootrom);
+
+       if (op->request->payload_size != sizeof(*size_request)) {
+               dev_err(dev, "%s: illegal size of firmware size request (%zu != %zu)\n",
+                       __func__, op->request->payload_size,
+                       sizeof(*size_request));
+               ret = -EINVAL;
+               goto queue_work;
+       }
+
+       mutex_lock(&bootrom->mutex);
+
+       ret = find_firmware(bootrom, size_request->stage);
+       if (ret)
+               goto unlock;
+
+       if (!gb_operation_response_alloc(op, sizeof(*size_response),
+                                        GFP_KERNEL)) {
+               dev_err(dev, "%s: error allocating response\n", __func__);
+               free_firmware(bootrom);
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       size_response = op->response->payload;
+       size_response->size = cpu_to_le32(bootrom->fw->size);
+
+       dev_dbg(dev, "%s: firmware size %d bytes\n", __func__, size_response->size);
+
+unlock:
+       mutex_unlock(&bootrom->mutex);
+
+queue_work:
+       if (!ret) {
+               /* Refresh timeout */
+               gb_bootrom_set_timeout(bootrom, NEXT_REQ_GET_FIRMWARE,
+                                      NEXT_REQ_TIMEOUT_MS);
+       }
+
+       return ret;
+}
+
+static int gb_bootrom_get_firmware(struct gb_operation *op)
+{
+       struct gb_bootrom *bootrom = gb_connection_get_data(op->connection);
+       const struct firmware *fw;
+       struct gb_bootrom_get_firmware_request *firmware_request;
+       struct gb_bootrom_get_firmware_response *firmware_response;
+       struct device *dev = &op->connection->bundle->dev;
+       unsigned int offset, size;
+       enum next_request_type next_request;
+       int ret = 0;
+
+       /* Disable timeouts */
+       gb_bootrom_cancel_timeout(bootrom);
+
+       if (op->request->payload_size != sizeof(*firmware_request)) {
+               dev_err(dev, "%s: Illegal size of get firmware request (%zu %zu)\n",
+                       __func__, op->request->payload_size,
+                       sizeof(*firmware_request));
+               ret = -EINVAL;
+               goto queue_work;
+       }
+
+       mutex_lock(&bootrom->mutex);
+
+       fw = bootrom->fw;
+       if (!fw) {
+               dev_err(dev, "%s: firmware not available\n", __func__);
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       firmware_request = op->request->payload;
+       offset = le32_to_cpu(firmware_request->offset);
+       size = le32_to_cpu(firmware_request->size);
+
+       if (offset >= fw->size || size > fw->size - offset) {
+               dev_warn(dev, "bad firmware request (offs = %u, size = %u)\n",
+                               offset, size);
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       if (!gb_operation_response_alloc(op, sizeof(*firmware_response) + size,
+                                        GFP_KERNEL)) {
+               dev_err(dev, "%s: error allocating response\n", __func__);
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       firmware_response = op->response->payload;
+       memcpy(firmware_response->data, fw->data + offset, size);
+
+       dev_dbg(dev, "responding with firmware (offs = %u, size = %u)\n", offset,
+               size);
+
+unlock:
+       mutex_unlock(&bootrom->mutex);
+
+queue_work:
+       /* Refresh timeout */
+       if (!ret && (offset + size == fw->size))
+               next_request = NEXT_REQ_READY_TO_BOOT;
+       else
+               next_request = NEXT_REQ_GET_FIRMWARE;
+
+       gb_bootrom_set_timeout(bootrom, next_request, NEXT_REQ_TIMEOUT_MS);
+
+       return ret;
+}
+
+static int gb_bootrom_ready_to_boot(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gb_bootrom *bootrom = gb_connection_get_data(connection);
+       struct gb_bootrom_ready_to_boot_request *rtb_request;
+       struct device *dev = &connection->bundle->dev;
+       u8 status;
+       int ret = 0;
+
+       /* Disable timeouts */
+       gb_bootrom_cancel_timeout(bootrom);
+
+       if (op->request->payload_size != sizeof(*rtb_request)) {
+               dev_err(dev, "%s: Illegal size of ready to boot request (%zu %zu)\n",
+                       __func__, op->request->payload_size,
+                       sizeof(*rtb_request));
+               ret = -EINVAL;
+               goto queue_work;
+       }
+
+       rtb_request = op->request->payload;
+       status = rtb_request->status;
+
+       /* Return error if the blob was invalid */
+       if (status == GB_BOOTROM_BOOT_STATUS_INVALID) {
+               ret = -EINVAL;
+               goto queue_work;
+       }
+
+       /*
+        * XXX Should we return error for insecure firmware?
+        */
+       dev_dbg(dev, "ready to boot: 0x%x, 0\n", status);
+
+queue_work:
+       /*
+        * Refresh timeout, the Interface shall load the new personality and
+        * send a new hotplug request, which shall get rid of the bootrom
+        * connection. As that can take some time, increase the timeout a bit.
+        */
+       gb_bootrom_set_timeout(bootrom, NEXT_REQ_MODE_SWITCH,
+                              MODE_SWITCH_TIMEOUT_MS);
+
+       return ret;
+}
+
+static int gb_bootrom_request_handler(struct gb_operation *op)
+{
+       u8 type = op->type;
+
+       switch (type) {
+       case GB_BOOTROM_TYPE_FIRMWARE_SIZE:
+               return gb_bootrom_firmware_size_request(op);
+       case GB_BOOTROM_TYPE_GET_FIRMWARE:
+               return gb_bootrom_get_firmware(op);
+       case GB_BOOTROM_TYPE_READY_TO_BOOT:
+               return gb_bootrom_ready_to_boot(op);
+       default:
+               dev_err(&op->connection->bundle->dev,
+                       "unsupported request: %u\n", type);
+               return -EINVAL;
+       }
+}
+
+static int gb_bootrom_get_version(struct gb_bootrom *bootrom)
+{
+       struct gb_bundle *bundle = bootrom->connection->bundle;
+       struct gb_bootrom_version_request request;
+       struct gb_bootrom_version_response response;
+       int ret;
+
+       request.major = GB_BOOTROM_VERSION_MAJOR;
+       request.minor = GB_BOOTROM_VERSION_MINOR;
+
+       ret = gb_operation_sync(bootrom->connection,
+                               GB_BOOTROM_TYPE_VERSION,
+                               &request, sizeof(request), &response,
+                               sizeof(response));
+       if (ret) {
+               dev_err(&bundle->dev,
+                               "failed to get protocol version: %d\n",
+                               ret);
+               return ret;
+       }
+
+       if (response.major > request.major) {
+               dev_err(&bundle->dev,
+                               "unsupported major protocol version (%u > %u)\n",
+                               response.major, request.major);
+               return -ENOTSUPP;
+       }
+
+       bootrom->protocol_major = response.major;
+       bootrom->protocol_minor = response.minor;
+
+       dev_dbg(&bundle->dev, "%s - %u.%u\n", __func__, response.major,
+                       response.minor);
+
+       return 0;
+}
+
+static int gb_bootrom_probe(struct gb_bundle *bundle,
+                                       const struct greybus_bundle_id *id)
+{
+       struct greybus_descriptor_cport *cport_desc;
+       struct gb_connection *connection;
+       struct gb_bootrom *bootrom;
+       int ret;
+
+       if (bundle->num_cports != 1)
+               return -ENODEV;
+
+       cport_desc = &bundle->cport_desc[0];
+       if (cport_desc->protocol_id != GREYBUS_PROTOCOL_BOOTROM)
+               return -ENODEV;
+
+       bootrom = kzalloc(sizeof(*bootrom), GFP_KERNEL);
+       if (!bootrom)
+               return -ENOMEM;
+
+       connection = gb_connection_create(bundle,
+                                               le16_to_cpu(cport_desc->id),
+                                               gb_bootrom_request_handler);
+       if (IS_ERR(connection)) {
+               ret = PTR_ERR(connection);
+               goto err_free_bootrom;
+       }
+
+       gb_connection_set_data(connection, bootrom);
+
+       bootrom->connection = connection;
+
+       mutex_init(&bootrom->mutex);
+       INIT_DELAYED_WORK(&bootrom->dwork, gb_bootrom_timedout);
+       greybus_set_drvdata(bundle, bootrom);
+
+       ret = gb_connection_enable_tx(connection);
+       if (ret)
+               goto err_connection_destroy;
+
+       ret = gb_bootrom_get_version(bootrom);
+       if (ret)
+               goto err_connection_disable;
+
+       bootrom_es2_fixup_vid_pid(bootrom);
+
+       ret = gb_connection_enable(connection);
+       if (ret)
+               goto err_connection_disable;
+
+       /* Refresh timeout */
+       gb_bootrom_set_timeout(bootrom, NEXT_REQ_FIRMWARE_SIZE,
+                              NEXT_REQ_TIMEOUT_MS);
+
+       /* Tell bootrom we're ready. */
+       ret = gb_operation_sync(connection, GB_BOOTROM_TYPE_AP_READY, NULL, 0,
+                               NULL, 0);
+       if (ret) {
+               dev_err(&connection->bundle->dev,
+                               "failed to send AP READY: %d\n", ret);
+               goto err_cancel_timeout;
+       }
+
+       dev_dbg(&bundle->dev, "AP_READY sent\n");
+
+       return 0;
+
+err_cancel_timeout:
+       gb_bootrom_cancel_timeout(bootrom);
+err_connection_disable:
+       gb_connection_disable(connection);
+err_connection_destroy:
+       gb_connection_destroy(connection);
+err_free_bootrom:
+       kfree(bootrom);
+
+       return ret;
+}
+
+static void gb_bootrom_disconnect(struct gb_bundle *bundle)
+{
+       struct gb_bootrom *bootrom = greybus_get_drvdata(bundle);
+
+       dev_dbg(&bundle->dev, "%s\n", __func__);
+
+       gb_connection_disable(bootrom->connection);
+
+       /* Disable timeouts */
+       gb_bootrom_cancel_timeout(bootrom);
+
+       /*
+        * Release firmware:
+        *
+        * As the connection and the delayed work are already disabled, we don't
+        * need to lock access to bootrom->fw here.
+        */
+       free_firmware(bootrom);
+
+       gb_connection_destroy(bootrom->connection);
+       kfree(bootrom);
+}
+
+static const struct greybus_bundle_id gb_bootrom_id_table[] = {
+       { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_BOOTROM) },
+       { }
+};
+
+static struct greybus_driver gb_bootrom_driver = {
+       .name           = "bootrom",
+       .probe          = gb_bootrom_probe,
+       .disconnect     = gb_bootrom_disconnect,
+       .id_table       = gb_bootrom_id_table,
+};
+
+module_greybus_driver(gb_bootrom_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/bundle.c b/drivers/staging/greybus/bundle.c
new file mode 100644 (file)
index 0000000..5bd7731
--- /dev/null
@@ -0,0 +1,254 @@
+/*
+ * Greybus bundles
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+static ssize_t bundle_class_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+
+       return sprintf(buf, "0x%02x\n", bundle->class);
+}
+static DEVICE_ATTR_RO(bundle_class);
+
+static ssize_t bundle_id_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+
+       return sprintf(buf, "%u\n", bundle->id);
+}
+static DEVICE_ATTR_RO(bundle_id);
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+
+       if (bundle->state == NULL)
+               return sprintf(buf, "\n");
+
+       return sprintf(buf, "%s\n", bundle->state);
+}
+
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t size)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+
+       kfree(bundle->state);
+       bundle->state = kstrdup(buf, GFP_KERNEL);
+       if (!bundle->state)
+               return -ENOMEM;
+
+       /* Tell userspace that the file contents changed */
+       sysfs_notify(&bundle->dev.kobj, NULL, "state");
+
+       return size;
+}
+static DEVICE_ATTR_RW(state);
+
+static struct attribute *bundle_attrs[] = {
+       &dev_attr_bundle_class.attr,
+       &dev_attr_bundle_id.attr,
+       &dev_attr_state.attr,
+       NULL,
+};
+
+ATTRIBUTE_GROUPS(bundle);
+
+static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
+                                                       u8 bundle_id)
+{
+       struct gb_bundle *bundle;
+
+       list_for_each_entry(bundle, &intf->bundles, links) {
+               if (bundle->id == bundle_id)
+                       return bundle;
+       }
+
+       return NULL;
+}
+
+static void gb_bundle_release(struct device *dev)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+
+       trace_gb_bundle_release(bundle);
+
+       kfree(bundle->state);
+       kfree(bundle->cport_desc);
+       kfree(bundle);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+
+static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
+{
+       struct gb_connection *connection;
+
+       list_for_each_entry(connection, &bundle->connections, bundle_links)
+               gb_connection_disable(connection);
+}
+
+static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
+{
+       struct gb_connection *connection;
+
+       list_for_each_entry(connection, &bundle->connections, bundle_links)
+               gb_connection_enable(connection);
+}
+
+static int gb_bundle_suspend(struct device *dev)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+       const struct dev_pm_ops *pm = dev->driver->pm;
+       int ret;
+
+       if (pm && pm->runtime_suspend) {
+               ret = pm->runtime_suspend(&bundle->dev);
+               if (ret)
+                       return ret;
+       } else {
+               gb_bundle_disable_all_connections(bundle);
+       }
+
+       ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
+       if (ret) {
+               if (pm && pm->runtime_resume)
+                       ret = pm->runtime_resume(dev);
+               else
+                       gb_bundle_enable_all_connections(bundle);
+
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_bundle_resume(struct device *dev)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+       const struct dev_pm_ops *pm = dev->driver->pm;
+       int ret;
+
+       ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
+       if (ret)
+               return ret;
+
+       if (pm && pm->runtime_resume) {
+               ret = pm->runtime_resume(dev);
+               if (ret)
+                       return ret;
+       } else {
+               gb_bundle_enable_all_connections(bundle);
+       }
+
+       return 0;
+}
+
+static int gb_bundle_idle(struct device *dev)
+{
+       pm_runtime_mark_last_busy(dev);
+       pm_request_autosuspend(dev);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_bundle_pm_ops = {
+       SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
+};
+
+struct device_type greybus_bundle_type = {
+       .name =         "greybus_bundle",
+       .release =      gb_bundle_release,
+       .pm =           &gb_bundle_pm_ops,
+};
+
+/*
+ * Create a gb_bundle structure to represent a discovered
+ * bundle.  Returns a pointer to the new bundle or a null
+ * pointer if a failure occurs due to memory exhaustion.
+ */
+struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
+                                  u8 class)
+{
+       struct gb_bundle *bundle;
+
+       if (bundle_id == BUNDLE_ID_NONE) {
+               dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
+               return NULL;
+       }
+
+       /*
+        * Reject any attempt to reuse a bundle id.  We initialize
+        * these serially, so there's no need to worry about keeping
+        * the interface bundle list locked here.
+        */
+       if (gb_bundle_find(intf, bundle_id)) {
+               dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
+               return NULL;
+       }
+
+       bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+       if (!bundle)
+               return NULL;
+
+       bundle->intf = intf;
+       bundle->id = bundle_id;
+       bundle->class = class;
+       INIT_LIST_HEAD(&bundle->connections);
+
+       bundle->dev.parent = &intf->dev;
+       bundle->dev.bus = &greybus_bus_type;
+       bundle->dev.type = &greybus_bundle_type;
+       bundle->dev.groups = bundle_groups;
+       bundle->dev.dma_mask = intf->dev.dma_mask;
+       device_initialize(&bundle->dev);
+       dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
+
+       list_add(&bundle->links, &intf->bundles);
+
+       trace_gb_bundle_create(bundle);
+
+       return bundle;
+}
+
+int gb_bundle_add(struct gb_bundle *bundle)
+{
+       int ret;
+
+       ret = device_add(&bundle->dev);
+       if (ret) {
+               dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
+               return ret;
+       }
+
+       trace_gb_bundle_add(bundle);
+
+       return 0;
+}
+
+/*
+ * Tear down a previously set up bundle.
+ */
+void gb_bundle_destroy(struct gb_bundle *bundle)
+{
+       trace_gb_bundle_destroy(bundle);
+
+       if (device_is_registered(&bundle->dev))
+               device_del(&bundle->dev);
+
+       list_del(&bundle->links);
+
+       put_device(&bundle->dev);
+}
diff --git a/drivers/staging/greybus/bundle.h b/drivers/staging/greybus/bundle.h
new file mode 100644 (file)
index 0000000..349845e
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Greybus bundles
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __BUNDLE_H
+#define __BUNDLE_H
+
+#include <linux/list.h>
+
+#define        BUNDLE_ID_NONE  U8_MAX
+
+/* Greybus "public" definitions" */
+struct gb_bundle {
+       struct device           dev;
+       struct gb_interface     *intf;
+
+       u8                      id;
+       u8                      class;
+       u8                      class_major;
+       u8                      class_minor;
+
+       size_t                  num_cports;
+       struct greybus_descriptor_cport *cport_desc;
+
+       struct list_head        connections;
+       u8                      *state;
+
+       struct list_head        links;  /* interface->bundles */
+};
+#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev)
+
+/* Greybus "private" definitions" */
+struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
+                                  u8 class);
+int gb_bundle_add(struct gb_bundle *bundle);
+void gb_bundle_destroy(struct gb_bundle *bundle);
+
+/* Bundle Runtime PM wrappers */
+#ifdef CONFIG_PM_RUNTIME
+static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
+{
+       int retval;
+
+       retval = pm_runtime_get_sync(&bundle->dev);
+       if (retval < 0) {
+               dev_err(&bundle->dev,
+                       "pm_runtime_get_sync failed: %d\n", retval);
+               pm_runtime_put_noidle(&bundle->dev);
+               return retval;
+       }
+
+       return 0;
+}
+
+static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
+{
+       int retval;
+
+       pm_runtime_mark_last_busy(&bundle->dev);
+       retval = pm_runtime_put_autosuspend(&bundle->dev);
+
+       return retval;
+}
+
+static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle)
+{
+       pm_runtime_get_noresume(&bundle->dev);
+}
+
+static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle)
+{
+       pm_runtime_put_noidle(&bundle->dev);
+}
+
+#else
+static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
+{ return 0; }
+static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
+{ return 0; }
+
+static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {}
+static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {}
+#endif
+
+#endif /* __BUNDLE_H */
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
new file mode 100644 (file)
index 0000000..e1f3046
--- /dev/null
@@ -0,0 +1,1400 @@
+/*
+ * Greybus Camera protocol driver.
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "gb-camera.h"
+#include "greybus.h"
+#include "greybus_protocols.h"
+
+enum gb_camera_debugs_buffer_id {
+       GB_CAMERA_DEBUGFS_BUFFER_CAPABILITIES,
+       GB_CAMERA_DEBUGFS_BUFFER_STREAMS,
+       GB_CAMERA_DEBUGFS_BUFFER_CAPTURE,
+       GB_CAMERA_DEBUGFS_BUFFER_FLUSH,
+       GB_CAMERA_DEBUGFS_BUFFER_MAX,
+};
+
+struct gb_camera_debugfs_buffer {
+       char data[PAGE_SIZE];
+       size_t length;
+};
+
+enum gb_camera_state {
+       GB_CAMERA_STATE_UNCONFIGURED,
+       GB_CAMERA_STATE_CONFIGURED,
+};
+
+/**
+ * struct gb_camera - A Greybus Camera Device
+ * @connection: the greybus connection for camera management
+ * @data_connection: the greybus connection for camera data
+ * @data_cport_id: the data CPort ID on the module side
+ * @mutex: protects the connection and state fields
+ * @state: the current module state
+ * @debugfs: debugfs entries for camera protocol operations testing
+ * @module: Greybus camera module registered to HOST processor.
+ */
+struct gb_camera {
+       struct gb_bundle *bundle;
+       struct gb_connection *connection;
+       struct gb_connection *data_connection;
+       u16 data_cport_id;
+
+       struct mutex mutex;
+       enum gb_camera_state state;
+
+       struct {
+               struct dentry *root;
+               struct gb_camera_debugfs_buffer *buffers;
+       } debugfs;
+
+       struct gb_camera_module module;
+};
+
+struct gb_camera_stream_config {
+       unsigned int width;
+       unsigned int height;
+       unsigned int format;
+       unsigned int vc;
+       unsigned int dt[2];
+       unsigned int max_size;
+};
+
+struct gb_camera_fmt_info {
+       enum v4l2_mbus_pixelcode mbus_code;
+       unsigned int gb_format;
+       unsigned int bpp;
+};
+
+/* GB format to media code map */
+static const struct gb_camera_fmt_info gb_fmt_info[] = {
+       {
+               .mbus_code = V4L2_MBUS_FMT_UYVY8_1X16,
+               .gb_format = 0x01,
+               .bpp       = 16,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_NV12_1x8,
+               .gb_format = 0x12,
+               .bpp       = 12,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_NV21_1x8,
+               .gb_format = 0x13,
+               .bpp       = 12,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_YU12_1x8,
+               .gb_format = 0x16,
+               .bpp       = 12,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_YV12_1x8,
+               .gb_format = 0x17,
+               .bpp       = 12,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_JPEG_1X8,
+               .gb_format = 0x40,
+               .bpp       = 0,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_GB_CAM_METADATA_1X8,
+               .gb_format = 0x41,
+               .bpp       = 0,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_GB_CAM_DEBUG_DATA_1X8,
+               .gb_format = 0x42,
+               .bpp       = 0,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_SBGGR10_1X10,
+               .gb_format = 0x80,
+               .bpp       = 10,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_SGBRG10_1X10,
+               .gb_format = 0x81,
+               .bpp       = 10,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10,
+               .gb_format = 0x82,
+               .bpp       = 10,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_SRGGB10_1X10,
+               .gb_format = 0x83,
+               .bpp       = 10,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_SBGGR12_1X12,
+               .gb_format = 0x84,
+               .bpp       = 12,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_SGBRG12_1X12,
+               .gb_format = 0x85,
+               .bpp       = 12,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12,
+               .gb_format = 0x86,
+               .bpp       = 12,
+       },
+       {
+               .mbus_code = V4L2_MBUS_FMT_SRGGB12_1X12,
+               .gb_format = 0x87,
+               .bpp       = 12,
+       },
+};
+
+static const struct gb_camera_fmt_info *gb_camera_get_format_info(u16 gb_fmt)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(gb_fmt_info); i++) {
+               if (gb_fmt_info[i].gb_format == gb_fmt)
+                       return &gb_fmt_info[i];
+       }
+
+       return NULL;
+}
+
+#define ES2_APB_CDSI0_CPORT            16
+#define ES2_APB_CDSI1_CPORT            17
+
+#define GB_CAMERA_MAX_SETTINGS_SIZE    8192
+
+#define gcam_dbg(gcam, format...)      dev_dbg(&gcam->bundle->dev, format)
+#define gcam_info(gcam, format...)     dev_info(&gcam->bundle->dev, format)
+#define gcam_err(gcam, format...)      dev_err(&gcam->bundle->dev, format)
+
+static int gb_camera_operation_sync_flags(struct gb_connection *connection,
+                                         int type, unsigned int flags,
+                                         void *request, size_t request_size,
+                                         void *response, size_t *response_size)
+{
+       struct gb_operation *operation;
+       int ret;
+
+       operation = gb_operation_create_flags(connection, type, request_size,
+                                             *response_size, flags,
+                                             GFP_KERNEL);
+       if (!operation)
+               return  -ENOMEM;
+
+       if (request_size)
+               memcpy(operation->request->payload, request, request_size);
+
+       ret = gb_operation_request_send_sync(operation);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: synchronous operation of type 0x%02x failed: %d\n",
+                       connection->name, type, ret);
+       } else {
+               *response_size = operation->response->payload_size;
+
+               if (operation->response->payload_size)
+                       memcpy(response, operation->response->payload,
+                              operation->response->payload_size);
+       }
+
+       gb_operation_put(operation);
+
+       return ret;
+}
+
+static int gb_camera_get_max_pkt_size(struct gb_camera *gcam,
+               struct gb_camera_configure_streams_response *resp)
+{
+       unsigned int max_pkt_size = 0;
+       unsigned int i;
+
+       for (i = 0; i < resp->num_streams; i++) {
+               struct gb_camera_stream_config_response *cfg = &resp->config[i];
+               const struct gb_camera_fmt_info *fmt_info;
+               unsigned int pkt_size;
+
+               fmt_info = gb_camera_get_format_info(cfg->format);
+               if (!fmt_info) {
+                       gcam_err(gcam, "unsupported greybus image format: %d\n",
+                                cfg->format);
+                       return -EIO;
+               }
+
+               if (fmt_info->bpp == 0) {
+                       pkt_size = le32_to_cpu(cfg->max_pkt_size);
+
+                       if (pkt_size == 0) {
+                               gcam_err(gcam,
+                                        "Stream %u: invalid zero maximum packet size\n",
+                                        i);
+                               return -EIO;
+                       }
+               } else {
+                       pkt_size = le16_to_cpu(cfg->width) * fmt_info->bpp / 8;
+
+                       if (pkt_size != le32_to_cpu(cfg->max_pkt_size)) {
+                               gcam_err(gcam,
+                                        "Stream %u: maximum packet size mismatch (%u/%u)\n",
+                                        i, pkt_size, cfg->max_pkt_size);
+                               return -EIO;
+                       }
+               }
+
+               max_pkt_size = max(pkt_size, max_pkt_size);
+       }
+
+       return max_pkt_size;
+}
+
+/*
+ * Validate the stream configuration response verifying padding is correctly
+ * set and the returned number of streams is supported
+ */
+static const int gb_camera_configure_streams_validate_response(
+               struct gb_camera *gcam,
+               struct gb_camera_configure_streams_response *resp,
+               unsigned int nstreams)
+{
+       unsigned int i;
+
+       /* Validate the returned response structure */
+       if (resp->padding[0] || resp->padding[1]) {
+               gcam_err(gcam, "response padding != 0\n");
+               return -EIO;
+       }
+
+       if (resp->num_streams > nstreams) {
+               gcam_err(gcam, "got #streams %u > request %u\n",
+                        resp->num_streams, nstreams);
+               return -EIO;
+       }
+
+       for (i = 0; i < resp->num_streams; i++) {
+               struct gb_camera_stream_config_response *cfg = &resp->config[i];
+               if (cfg->padding) {
+                       gcam_err(gcam, "stream #%u padding != 0\n", i);
+                       return -EIO;
+               }
+       }
+
+       return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware Configuration
+ */
+
+static int gb_camera_set_intf_power_mode(struct gb_camera *gcam, u8 intf_id,
+                                        bool hs)
+{
+       struct gb_svc *svc = gcam->connection->hd->svc;
+       int ret;
+
+       if (hs)
+               ret = gb_svc_intf_set_power_mode(svc, intf_id,
+                                                GB_SVC_UNIPRO_HS_SERIES_A,
+                                                GB_SVC_UNIPRO_FAST_MODE, 2, 2,
+                                                GB_SVC_SMALL_AMPLITUDE,
+                                                GB_SVC_NO_DE_EMPHASIS,
+                                                GB_SVC_UNIPRO_FAST_MODE, 2, 2,
+                                                GB_SVC_PWRM_RXTERMINATION |
+                                                GB_SVC_PWRM_TXTERMINATION, 0,
+                                                NULL, NULL);
+       else
+               ret = gb_svc_intf_set_power_mode(svc, intf_id,
+                                                GB_SVC_UNIPRO_HS_SERIES_A,
+                                                GB_SVC_UNIPRO_SLOW_AUTO_MODE,
+                                                2, 1,
+                                                GB_SVC_SMALL_AMPLITUDE,
+                                                GB_SVC_NO_DE_EMPHASIS,
+                                                GB_SVC_UNIPRO_SLOW_AUTO_MODE,
+                                                2, 1,
+                                                0, 0,
+                                                NULL, NULL);
+
+       return ret;
+}
+
+static int gb_camera_set_power_mode(struct gb_camera *gcam, bool hs)
+{
+       struct gb_interface *intf = gcam->connection->intf;
+       struct gb_svc *svc = gcam->connection->hd->svc;
+       int ret;
+
+       ret = gb_camera_set_intf_power_mode(gcam, intf->interface_id, hs);
+       if (ret < 0) {
+               gcam_err(gcam, "failed to set module interface to %s (%d)\n",
+                        hs ? "HS" : "PWM", ret);
+               return ret;
+       }
+
+       ret = gb_camera_set_intf_power_mode(gcam, svc->ap_intf_id, hs);
+       if (ret < 0) {
+               gb_camera_set_intf_power_mode(gcam, intf->interface_id, !hs);
+               gcam_err(gcam, "failed to set AP interface to %s (%d)\n",
+                        hs ? "HS" : "PWM", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+struct ap_csi_config_request {
+       __u8 csi_id;
+       __u8 flags;
+#define GB_CAMERA_CSI_FLAG_CLOCK_CONTINUOUS 0x01
+       __u8 num_lanes;
+       __u8 padding;
+       __le32 csi_clk_freq;
+       __le32 max_pkt_size;
+} __packed;
+
+/*
+ * TODO: Compute the number of lanes dynamically based on bandwidth
+ * requirements.
+ */
+#define GB_CAMERA_CSI_NUM_DATA_LANES           4
+
+#define GB_CAMERA_CSI_CLK_FREQ_MAX             999000000U
+#define GB_CAMERA_CSI_CLK_FREQ_MIN             100000000U
+#define GB_CAMERA_CSI_CLK_FREQ_MARGIN          150000000U
+
+static int gb_camera_setup_data_connection(struct gb_camera *gcam,
+               struct gb_camera_configure_streams_response *resp,
+               struct gb_camera_csi_params *csi_params)
+{
+       struct ap_csi_config_request csi_cfg;
+       struct gb_connection *conn;
+       unsigned int clk_freq;
+       int ret;
+
+       /*
+        * Create the data connection between the camera module data CPort and
+        * APB CDSI1. The CDSI1 CPort ID is hardcoded by the ES2 bridge.
+        */
+       conn = gb_connection_create_offloaded(gcam->bundle, gcam->data_cport_id,
+                                             GB_CONNECTION_FLAG_NO_FLOWCTRL |
+                                             GB_CONNECTION_FLAG_CDSI1);
+       if (IS_ERR(conn))
+               return PTR_ERR(conn);
+
+       gcam->data_connection = conn;
+       gb_connection_set_data(conn, gcam);
+
+       ret = gb_connection_enable(conn);
+       if (ret)
+               goto error_conn_destroy;
+
+       /* Set the UniPro link to high speed mode. */
+       ret = gb_camera_set_power_mode(gcam, true);
+       if (ret < 0)
+               goto error_conn_disable;
+
+       /*
+        * Configure the APB-A CSI-2 transmitter.
+        *
+        * Hardcode the number of lanes to 4 and compute the bus clock frequency
+        * based on the module bandwidth requirements with a safety margin.
+        */
+       memset(&csi_cfg, 0, sizeof(csi_cfg));
+       csi_cfg.csi_id = 1;
+       csi_cfg.flags = 0;
+       csi_cfg.num_lanes = GB_CAMERA_CSI_NUM_DATA_LANES;
+
+       clk_freq = resp->data_rate / 2 / GB_CAMERA_CSI_NUM_DATA_LANES;
+       clk_freq = clamp(clk_freq + GB_CAMERA_CSI_CLK_FREQ_MARGIN,
+                        GB_CAMERA_CSI_CLK_FREQ_MIN,
+                        GB_CAMERA_CSI_CLK_FREQ_MAX);
+       csi_cfg.csi_clk_freq = clk_freq;
+
+       ret = gb_camera_get_max_pkt_size(gcam, resp);
+       if (ret < 0) {
+               ret = -EIO;
+               goto error_power;
+       }
+       csi_cfg.max_pkt_size = ret;
+
+       ret = gb_hd_output(gcam->connection->hd, &csi_cfg,
+                          sizeof(csi_cfg),
+                          GB_APB_REQUEST_CSI_TX_CONTROL, false);
+       if (ret < 0) {
+               gcam_err(gcam, "failed to start the CSI transmitter\n");
+               goto error_power;
+       }
+
+       if (csi_params) {
+               csi_params->clk_freq = csi_cfg.csi_clk_freq;
+               csi_params->num_lanes = csi_cfg.num_lanes;
+       }
+
+       return 0;
+
+error_power:
+       gb_camera_set_power_mode(gcam, false);
+error_conn_disable:
+       gb_connection_disable(gcam->data_connection);
+error_conn_destroy:
+       gb_connection_destroy(gcam->data_connection);
+       gcam->data_connection = NULL;
+       return ret;
+}
+
+static void gb_camera_teardown_data_connection(struct gb_camera *gcam)
+{
+       struct ap_csi_config_request csi_cfg;
+       int ret;
+
+       /* Stop the APB1 CSI transmitter. */
+       memset(&csi_cfg, 0, sizeof(csi_cfg));
+       csi_cfg.csi_id = 1;
+
+       ret = gb_hd_output(gcam->connection->hd, &csi_cfg,
+                          sizeof(csi_cfg),
+                          GB_APB_REQUEST_CSI_TX_CONTROL, false);
+
+       if (ret < 0)
+               gcam_err(gcam, "failed to stop the CSI transmitter\n");
+
+       /* Set the UniPro link to low speed mode. */
+       gb_camera_set_power_mode(gcam, false);
+
+       /* Destroy the data connection. */
+       gb_connection_disable(gcam->data_connection);
+       gb_connection_destroy(gcam->data_connection);
+       gcam->data_connection = NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Camera Protocol Operations
+ */
+
+static int gb_camera_capabilities(struct gb_camera *gcam,
+                                 u8 *capabilities, size_t *size)
+{
+       int ret;
+
+       ret = gb_pm_runtime_get_sync(gcam->bundle);
+       if (ret)
+               return ret;
+
+       mutex_lock(&gcam->mutex);
+
+       if (!gcam->connection) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       ret = gb_camera_operation_sync_flags(gcam->connection,
+                                            GB_CAMERA_TYPE_CAPABILITIES,
+                                            GB_OPERATION_FLAG_SHORT_RESPONSE,
+                                            NULL, 0,
+                                            (void *)capabilities, size);
+       if (ret)
+               gcam_err(gcam, "failed to retrieve capabilities: %d\n", ret);
+
+done:
+       mutex_unlock(&gcam->mutex);
+
+       gb_pm_runtime_put_autosuspend(gcam->bundle);
+
+       return ret;
+}
+
+static int gb_camera_configure_streams(struct gb_camera *gcam,
+                                      unsigned int *num_streams,
+                                      unsigned int *flags,
+                                      struct gb_camera_stream_config *streams,
+                                      struct gb_camera_csi_params *csi_params)
+{
+       struct gb_camera_configure_streams_request *req;
+       struct gb_camera_configure_streams_response *resp;
+       unsigned int nstreams = *num_streams;
+       unsigned int i;
+       size_t req_size;
+       size_t resp_size;
+       int ret;
+
+       if (nstreams > GB_CAMERA_MAX_STREAMS)
+               return -EINVAL;
+
+       req_size = sizeof(*req) + nstreams * sizeof(req->config[0]);
+       resp_size = sizeof(*resp) + nstreams * sizeof(resp->config[0]);
+
+       req = kmalloc(req_size, GFP_KERNEL);
+       resp = kmalloc(resp_size, GFP_KERNEL);
+       if (!req || !resp) {
+               kfree(req);
+               kfree(resp);
+               return -ENOMEM;
+       }
+
+       req->num_streams = nstreams;
+       req->flags = *flags;
+       req->padding = 0;
+
+       for (i = 0; i < nstreams; ++i) {
+               struct gb_camera_stream_config_request *cfg = &req->config[i];
+
+               cfg->width = cpu_to_le16(streams[i].width);
+               cfg->height = cpu_to_le16(streams[i].height);
+               cfg->format = cpu_to_le16(streams[i].format);
+               cfg->padding = 0;
+       }
+
+       mutex_lock(&gcam->mutex);
+
+       ret = gb_pm_runtime_get_sync(gcam->bundle);
+       if (ret)
+               goto done_skip_pm_put;
+
+       if (!gcam->connection) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       ret = gb_camera_operation_sync_flags(gcam->connection,
+                                            GB_CAMERA_TYPE_CONFIGURE_STREAMS,
+                                            GB_OPERATION_FLAG_SHORT_RESPONSE,
+                                            req, req_size,
+                                            resp, &resp_size);
+       if (ret < 0)
+               goto done;
+
+       ret = gb_camera_configure_streams_validate_response(gcam, resp,
+                                                           nstreams);
+       if (ret < 0)
+               goto done;
+
+       *flags = resp->flags;
+       *num_streams = resp->num_streams;
+
+       for (i = 0; i < resp->num_streams; ++i) {
+               struct gb_camera_stream_config_response *cfg = &resp->config[i];
+
+               streams[i].width = le16_to_cpu(cfg->width);
+               streams[i].height = le16_to_cpu(cfg->height);
+               streams[i].format = le16_to_cpu(cfg->format);
+               streams[i].vc = cfg->virtual_channel;
+               streams[i].dt[0] = cfg->data_type[0];
+               streams[i].dt[1] = cfg->data_type[1];
+               streams[i].max_size = le32_to_cpu(cfg->max_size);
+       }
+
+       if ((resp->flags & GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED) ||
+           (req->flags & GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY))
+               goto done;
+
+       if (gcam->state == GB_CAMERA_STATE_CONFIGURED) {
+               gb_camera_teardown_data_connection(gcam);
+               gcam->state = GB_CAMERA_STATE_UNCONFIGURED;
+
+               /*
+                * When unconfiguring streams release the PM runtime reference
+                * that was acquired when streams were configured. The bundle
+                * won't be suspended until the PM runtime reference acquired at
+                * the beginning of this function gets released right before
+                * returning.
+                */
+               gb_pm_runtime_put_noidle(gcam->bundle);
+       }
+
+       if (resp->num_streams == 0)
+               goto done;
+
+       /*
+        * Make sure the bundle won't be suspended until streams get
+        * unconfigured after the stream is configured successfully
+        */
+       gb_pm_runtime_get_noresume(gcam->bundle);
+
+       /* Setup CSI-2 connection from APB-A to AP */
+       ret = gb_camera_setup_data_connection(gcam, resp, csi_params);
+       if (ret < 0) {
+               memset(req, 0, sizeof(*req));
+               gb_operation_sync(gcam->connection,
+                                 GB_CAMERA_TYPE_CONFIGURE_STREAMS,
+                                 req, sizeof(*req),
+                                 resp, sizeof(*resp));
+               *flags = 0;
+               *num_streams = 0;
+               gb_pm_runtime_put_noidle(gcam->bundle);
+               goto done;
+       }
+
+       gcam->state = GB_CAMERA_STATE_CONFIGURED;
+
+done:
+       gb_pm_runtime_put_autosuspend(gcam->bundle);
+
+done_skip_pm_put:
+       mutex_unlock(&gcam->mutex);
+       kfree(req);
+       kfree(resp);
+       return ret;
+}
+
+static int gb_camera_capture(struct gb_camera *gcam, u32 request_id,
+                            unsigned int streams, unsigned int num_frames,
+                            size_t settings_size, const void *settings)
+{
+       struct gb_camera_capture_request *req;
+       size_t req_size;
+       int ret;
+
+       if (settings_size > GB_CAMERA_MAX_SETTINGS_SIZE)
+               return -EINVAL;
+
+       req_size = sizeof(*req) + settings_size;
+       req = kmalloc(req_size, GFP_KERNEL);
+       if (!req)
+               return -ENOMEM;
+
+       req->request_id = cpu_to_le32(request_id);
+       req->streams = streams;
+       req->padding = 0;
+       req->num_frames = cpu_to_le16(num_frames);
+       memcpy(req->settings, settings, settings_size);
+
+       mutex_lock(&gcam->mutex);
+
+       if (!gcam->connection) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       ret = gb_operation_sync(gcam->connection, GB_CAMERA_TYPE_CAPTURE,
+                               req, req_size, NULL, 0);
+done:
+       mutex_unlock(&gcam->mutex);
+
+       kfree(req);
+
+       return ret;
+}
+
+static int gb_camera_flush(struct gb_camera *gcam, u32 *request_id)
+{
+       struct gb_camera_flush_response resp;
+       int ret;
+
+       mutex_lock(&gcam->mutex);
+
+       if (!gcam->connection) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       ret = gb_operation_sync(gcam->connection, GB_CAMERA_TYPE_FLUSH, NULL, 0,
+                               &resp, sizeof(resp));
+
+       if (ret < 0)
+               goto done;
+
+       if (request_id)
+               *request_id = le32_to_cpu(resp.request_id);
+
+done:
+       mutex_unlock(&gcam->mutex);
+
+       return ret;
+}
+
+static int gb_camera_request_handler(struct gb_operation *op)
+{
+       struct gb_camera *gcam = gb_connection_get_data(op->connection);
+       struct gb_camera_metadata_request *payload;
+       struct gb_message *request;
+
+       if (op->type != GB_CAMERA_TYPE_METADATA) {
+               gcam_err(gcam, "Unsupported unsolicited event: %u\n", op->type);
+               return -EINVAL;
+       }
+
+       request = op->request;
+
+       if (request->payload_size < sizeof(*payload)) {
+               gcam_err(gcam, "Wrong event size received (%zu < %zu)\n",
+                        request->payload_size, sizeof(*payload));
+               return -EINVAL;
+       }
+
+       payload = request->payload;
+
+       gcam_dbg(gcam, "received metadata for request %u, frame %u, stream %u\n",
+                payload->request_id, payload->frame_number, payload->stream);
+
+       return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Interface with HOST gmp camera.
+ */
+static unsigned int gb_camera_mbus_to_gb(enum v4l2_mbus_pixelcode mbus_code)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(gb_fmt_info); i++) {
+               if (gb_fmt_info[i].mbus_code == mbus_code)
+                       return gb_fmt_info[i].gb_format;
+       }
+       return gb_fmt_info[0].gb_format;
+}
+
+static enum v4l2_mbus_pixelcode gb_camera_gb_to_mbus(u16 gb_fmt)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(gb_fmt_info); i++) {
+               if (gb_fmt_info[i].gb_format == gb_fmt)
+                       return gb_fmt_info[i].mbus_code;
+       }
+       return gb_fmt_info[0].mbus_code;
+}
+
+static ssize_t gb_camera_op_capabilities(void *priv, char *data, size_t len)
+{
+       struct gb_camera *gcam = priv;
+       size_t capabilities_len = len;
+       int ret;
+
+       ret = gb_camera_capabilities(gcam, data, &capabilities_len);
+       if (ret)
+               return ret;
+
+       return capabilities_len;
+}
+
+static int gb_camera_op_configure_streams(void *priv, unsigned int *nstreams,
+               unsigned int *flags, struct gb_camera_stream *streams,
+               struct gb_camera_csi_params *csi_params)
+{
+       struct gb_camera *gcam = priv;
+       struct gb_camera_stream_config *gb_streams;
+       unsigned int gb_flags = 0;
+       unsigned int gb_nstreams = *nstreams;
+       unsigned int i;
+       int ret;
+
+       if (gb_nstreams > GB_CAMERA_MAX_STREAMS)
+               return -EINVAL;
+
+       gb_streams = kzalloc(gb_nstreams * sizeof(*gb_streams), GFP_KERNEL);
+       if (!gb_streams)
+               return -ENOMEM;
+
+       for (i = 0; i < gb_nstreams; i++) {
+               gb_streams[i].width = streams[i].width;
+               gb_streams[i].height = streams[i].height;
+               gb_streams[i].format =
+                       gb_camera_mbus_to_gb(streams[i].pixel_code);
+       }
+
+       if (*flags & GB_CAMERA_IN_FLAG_TEST)
+               gb_flags |= GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY;
+
+       ret = gb_camera_configure_streams(gcam, &gb_nstreams,
+                                         &gb_flags, gb_streams, csi_params);
+       if (ret < 0)
+               goto done;
+       if (gb_nstreams > *nstreams) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       *flags = 0;
+       if (gb_flags & GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED)
+               *flags |= GB_CAMERA_OUT_FLAG_ADJUSTED;
+
+       for (i = 0; i < gb_nstreams; i++) {
+               streams[i].width = gb_streams[i].width;
+               streams[i].height = gb_streams[i].height;
+               streams[i].vc = gb_streams[i].vc;
+               streams[i].dt[0] = gb_streams[i].dt[0];
+               streams[i].dt[1] = gb_streams[i].dt[1];
+               streams[i].max_size = gb_streams[i].max_size;
+               streams[i].pixel_code =
+                       gb_camera_gb_to_mbus(gb_streams[i].format);
+       }
+       *nstreams = gb_nstreams;
+
+done:
+       kfree(gb_streams);
+       return ret;
+}
+
+static int gb_camera_op_capture(void *priv, u32 request_id,
+               unsigned int streams, unsigned int num_frames,
+               size_t settings_size, const void *settings)
+{
+       struct gb_camera *gcam = priv;
+
+       return gb_camera_capture(gcam, request_id, streams, num_frames,
+                                settings_size, settings);
+}
+
+static int gb_camera_op_flush(void *priv, u32 *request_id)
+{
+       struct gb_camera *gcam = priv;
+
+       return gb_camera_flush(gcam, request_id);
+}
+
+static const struct gb_camera_ops gb_cam_ops = {
+       .capabilities = gb_camera_op_capabilities,
+       .configure_streams = gb_camera_op_configure_streams,
+       .capture = gb_camera_op_capture,
+       .flush = gb_camera_op_flush,
+};
+
+/* -----------------------------------------------------------------------------
+ * DebugFS
+ */
+
+static ssize_t gb_camera_debugfs_capabilities(struct gb_camera *gcam,
+               char *buf, size_t len)
+{
+       struct gb_camera_debugfs_buffer *buffer =
+               &gcam->debugfs.buffers[GB_CAMERA_DEBUGFS_BUFFER_CAPABILITIES];
+       size_t size = 1024;
+       unsigned int i;
+       u8 *caps;
+       int ret;
+
+       caps = kmalloc(size, GFP_KERNEL);
+       if (!caps)
+               return -ENOMEM;
+
+       ret = gb_camera_capabilities(gcam, caps, &size);
+       if (ret < 0)
+               goto done;
+
+       /*
+        * hex_dump_to_buffer() doesn't return the number of bytes dumped prior
+        * to v4.0, we need our own implementation :-(
+        */
+       buffer->length = 0;
+
+       for (i = 0; i < size; i += 16) {
+               unsigned int nbytes = min_t(unsigned int, size - i, 16);
+
+               buffer->length += sprintf(buffer->data + buffer->length,
+                                         "%*ph\n", nbytes, caps + i);
+       }
+
+done:
+       kfree(caps);
+       return ret;
+}
+
+static ssize_t gb_camera_debugfs_configure_streams(struct gb_camera *gcam,
+               char *buf, size_t len)
+{
+       struct gb_camera_debugfs_buffer *buffer =
+               &gcam->debugfs.buffers[GB_CAMERA_DEBUGFS_BUFFER_STREAMS];
+       struct gb_camera_stream_config *streams;
+       unsigned int nstreams;
+       unsigned int flags;
+       unsigned int i;
+       char *token;
+       int ret;
+
+       /* Retrieve number of streams to configure */
+       token = strsep(&buf, ";");
+       if (token == NULL)
+               return -EINVAL;
+
+       ret = kstrtouint(token, 10, &nstreams);
+       if (ret < 0)
+               return ret;
+
+       if (nstreams > GB_CAMERA_MAX_STREAMS)
+               return -EINVAL;
+
+       token = strsep(&buf, ";");
+       if (token == NULL)
+               return -EINVAL;
+
+       ret = kstrtouint(token, 10, &flags);
+       if (ret < 0)
+               return ret;
+
+       /* For each stream to configure parse width, height and format */
+       streams = kzalloc(nstreams * sizeof(*streams), GFP_KERNEL);
+       if (!streams)
+               return -ENOMEM;
+
+       for (i = 0; i < nstreams; ++i) {
+               struct gb_camera_stream_config *stream = &streams[i];
+
+               /* width */
+               token = strsep(&buf, ";");
+               if (token == NULL) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+               ret = kstrtouint(token, 10, &stream->width);
+               if (ret < 0)
+                       goto done;
+
+               /* height */
+               token = strsep(&buf, ";");
+               if (token == NULL)
+                       goto done;
+
+               ret = kstrtouint(token, 10, &stream->height);
+               if (ret < 0)
+                       goto done;
+
+               /* Image format code */
+               token = strsep(&buf, ";");
+               if (token == NULL)
+                       goto done;
+
+               ret = kstrtouint(token, 16, &stream->format);
+               if (ret < 0)
+                       goto done;
+       }
+
+       ret = gb_camera_configure_streams(gcam, &nstreams, &flags, streams,
+                                         NULL);
+       if (ret < 0)
+               goto done;
+
+       buffer->length = sprintf(buffer->data, "%u;%u;", nstreams, flags);
+
+       for (i = 0; i < nstreams; ++i) {
+               struct gb_camera_stream_config *stream = &streams[i];
+
+               buffer->length += sprintf(buffer->data + buffer->length,
+                                         "%u;%u;%u;%u;%u;%u;%u;",
+                                         stream->width, stream->height,
+                                         stream->format, stream->vc,
+                                         stream->dt[0], stream->dt[1],
+                                         stream->max_size);
+       }
+
+       ret = len;
+
+done:
+       kfree(streams);
+       return ret;
+};
+
+static ssize_t gb_camera_debugfs_capture(struct gb_camera *gcam,
+               char *buf, size_t len)
+{
+       unsigned int request_id;
+       unsigned int streams_mask;
+       unsigned int num_frames;
+       char *token;
+       int ret;
+
+       /* Request id */
+       token = strsep(&buf, ";");
+       if (token == NULL)
+               return -EINVAL;
+       ret = kstrtouint(token, 10, &request_id);
+       if (ret < 0)
+               return ret;
+
+       /* Stream mask */
+       token = strsep(&buf, ";");
+       if (token == NULL)
+               return -EINVAL;
+       ret = kstrtouint(token, 16, &streams_mask);
+       if (ret < 0)
+               return ret;
+
+       /* number of frames */
+       token = strsep(&buf, ";");
+       if (token == NULL)
+               return -EINVAL;
+       ret = kstrtouint(token, 10, &num_frames);
+       if (ret < 0)
+               return ret;
+
+       ret = gb_camera_capture(gcam, request_id, streams_mask, num_frames, 0,
+                               NULL);
+       if (ret < 0)
+               return ret;
+
+       return len;
+}
+
+static ssize_t gb_camera_debugfs_flush(struct gb_camera *gcam,
+               char *buf, size_t len)
+{
+       struct gb_camera_debugfs_buffer *buffer =
+               &gcam->debugfs.buffers[GB_CAMERA_DEBUGFS_BUFFER_FLUSH];
+       unsigned int req_id;
+       int ret;
+
+       ret = gb_camera_flush(gcam, &req_id);
+       if (ret < 0)
+               return ret;
+
+       buffer->length = sprintf(buffer->data, "%u", req_id);
+
+       return len;
+}
+
+struct gb_camera_debugfs_entry {
+       const char *name;
+       unsigned int mask;
+       unsigned int buffer;
+       ssize_t (*execute)(struct gb_camera *gcam, char *buf, size_t len);
+};
+
+static const struct gb_camera_debugfs_entry gb_camera_debugfs_entries[] = {
+       {
+               .name = "capabilities",
+               .mask = S_IFREG | S_IRUGO,
+               .buffer = GB_CAMERA_DEBUGFS_BUFFER_CAPABILITIES,
+               .execute = gb_camera_debugfs_capabilities,
+       }, {
+               .name = "configure_streams",
+               .mask = S_IFREG | S_IRUGO | S_IWUGO,
+               .buffer = GB_CAMERA_DEBUGFS_BUFFER_STREAMS,
+               .execute = gb_camera_debugfs_configure_streams,
+       }, {
+               .name = "capture",
+               .mask = S_IFREG | S_IRUGO | S_IWUGO,
+               .buffer = GB_CAMERA_DEBUGFS_BUFFER_CAPTURE,
+               .execute = gb_camera_debugfs_capture,
+       }, {
+               .name = "flush",
+               .mask = S_IFREG | S_IRUGO | S_IWUGO,
+               .buffer = GB_CAMERA_DEBUGFS_BUFFER_FLUSH,
+               .execute = gb_camera_debugfs_flush,
+       },
+};
+
+static ssize_t gb_camera_debugfs_read(struct file *file, char __user *buf,
+                                     size_t len, loff_t *offset)
+{
+       const struct gb_camera_debugfs_entry *op = file->private_data;
+       struct gb_camera *gcam = file->f_inode->i_private;
+       struct gb_camera_debugfs_buffer *buffer;
+       ssize_t ret;
+
+       /* For read-only entries the operation is triggered by a read. */
+       if (!(op->mask & S_IWUGO)) {
+               ret = op->execute(gcam, NULL, 0);
+               if (ret < 0)
+                       return ret;
+       }
+
+       buffer = &gcam->debugfs.buffers[op->buffer];
+
+       return simple_read_from_buffer(buf, len, offset, buffer->data,
+                                      buffer->length);
+}
+
+static ssize_t gb_camera_debugfs_write(struct file *file,
+                                      const char __user *buf, size_t len,
+                                      loff_t *offset)
+{
+       const struct gb_camera_debugfs_entry *op = file->private_data;
+       struct gb_camera *gcam = file->f_inode->i_private;
+       ssize_t ret;
+       char *kbuf;
+
+       if (len > 1024)
+              return -EINVAL;
+
+       kbuf = kmalloc(len + 1, GFP_KERNEL);
+       if (kbuf == NULL)
+               return -ENOMEM;
+
+       if (copy_from_user(kbuf, buf, len)) {
+               ret = -EFAULT;
+               goto done;
+       }
+
+       kbuf[len] = '\0';
+
+       ret = op->execute(gcam, kbuf, len);
+
+done:
+       kfree(kbuf);
+       return ret;
+}
+
+static int gb_camera_debugfs_open(struct inode *inode, struct file *file)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(gb_camera_debugfs_entries); ++i) {
+               const struct gb_camera_debugfs_entry *entry =
+                       &gb_camera_debugfs_entries[i];
+
+               if (!strcmp(file->f_path.dentry->d_iname, entry->name)) {
+                       file->private_data = (void *)entry;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static const struct file_operations gb_camera_debugfs_ops = {
+       .open = gb_camera_debugfs_open,
+       .read = gb_camera_debugfs_read,
+       .write = gb_camera_debugfs_write,
+};
+
+static int gb_camera_debugfs_init(struct gb_camera *gcam)
+{
+       struct gb_connection *connection = gcam->connection;
+       char dirname[27];
+       unsigned int i;
+
+       /*
+        * Create root debugfs entry and a file entry for each camera operation.
+        */
+       snprintf(dirname, 27, "camera-%u.%u", connection->intf->interface_id,
+                gcam->bundle->id);
+
+       gcam->debugfs.root = debugfs_create_dir(dirname, gb_debugfs_get());
+       if (IS_ERR(gcam->debugfs.root)) {
+               gcam_err(gcam, "debugfs root create failed (%ld)\n",
+                        PTR_ERR(gcam->debugfs.root));
+               return PTR_ERR(gcam->debugfs.root);
+       }
+
+       gcam->debugfs.buffers = vmalloc(sizeof(*gcam->debugfs.buffers) *
+                                       GB_CAMERA_DEBUGFS_BUFFER_MAX);
+       if (!gcam->debugfs.buffers)
+               return -ENOMEM;
+
+       for (i = 0; i < ARRAY_SIZE(gb_camera_debugfs_entries); ++i) {
+               const struct gb_camera_debugfs_entry *entry =
+                       &gb_camera_debugfs_entries[i];
+               struct dentry *dentry;
+
+               gcam->debugfs.buffers[i].length = 0;
+
+               dentry = debugfs_create_file(entry->name, entry->mask,
+                                            gcam->debugfs.root, gcam,
+                                            &gb_camera_debugfs_ops);
+               if (IS_ERR(dentry)) {
+                       gcam_err(gcam,
+                                "debugfs operation %s create failed (%ld)\n",
+                                entry->name, PTR_ERR(dentry));
+                       return PTR_ERR(dentry);
+               }
+       }
+
+       return 0;
+}
+
+static void gb_camera_debugfs_cleanup(struct gb_camera *gcam)
+{
+       debugfs_remove_recursive(gcam->debugfs.root);
+
+       vfree(gcam->debugfs.buffers);
+}
+
+/* -----------------------------------------------------------------------------
+ * Init & Cleanup
+ */
+
+static void gb_camera_cleanup(struct gb_camera *gcam)
+{
+       gb_camera_debugfs_cleanup(gcam);
+
+       mutex_lock(&gcam->mutex);
+       if (gcam->data_connection) {
+               gb_connection_disable(gcam->data_connection);
+               gb_connection_destroy(gcam->data_connection);
+               gcam->data_connection = NULL;
+       }
+
+       if (gcam->connection) {
+               gb_connection_disable(gcam->connection);
+               gb_connection_destroy(gcam->connection);
+               gcam->connection = NULL;
+       }
+       mutex_unlock(&gcam->mutex);
+}
+
+static void gb_camera_release_module(struct kref *ref)
+{
+       struct gb_camera_module *cam_mod =
+               container_of(ref, struct gb_camera_module, refcount);
+       kfree(cam_mod->priv);
+}
+
+static int gb_camera_probe(struct gb_bundle *bundle,
+                          const struct greybus_bundle_id *id)
+{
+       struct gb_connection *conn;
+       struct gb_camera *gcam;
+       u16 mgmt_cport_id = 0;
+       u16 data_cport_id = 0;
+       unsigned int i;
+       int ret;
+
+       /*
+        * The camera bundle must contain exactly two CPorts, one for the
+        * camera management protocol and one for the camera data protocol.
+        */
+       if (bundle->num_cports != 2)
+               return -ENODEV;
+
+       for (i = 0; i < bundle->num_cports; ++i) {
+               struct greybus_descriptor_cport *desc = &bundle->cport_desc[i];
+
+               switch (desc->protocol_id) {
+               case GREYBUS_PROTOCOL_CAMERA_MGMT:
+                       mgmt_cport_id = le16_to_cpu(desc->id);
+                       break;
+               case GREYBUS_PROTOCOL_CAMERA_DATA:
+                       data_cport_id = le16_to_cpu(desc->id);
+                       break;
+               default:
+                       return -ENODEV;
+               }
+       }
+
+       if (!mgmt_cport_id || !data_cport_id)
+               return -ENODEV;
+
+       gcam = kzalloc(sizeof(*gcam), GFP_KERNEL);
+       if (!gcam)
+               return -ENOMEM;
+
+       mutex_init(&gcam->mutex);
+
+       gcam->bundle = bundle;
+       gcam->state = GB_CAMERA_STATE_UNCONFIGURED;
+       gcam->data_cport_id = data_cport_id;
+
+       conn = gb_connection_create(bundle, mgmt_cport_id,
+                                   gb_camera_request_handler);
+       if (IS_ERR(conn)) {
+               ret = PTR_ERR(conn);
+               goto error;
+       }
+
+       gcam->connection = conn;
+       gb_connection_set_data(conn, gcam);
+
+       ret = gb_connection_enable(conn);
+       if (ret)
+               goto error;
+
+       ret = gb_camera_debugfs_init(gcam);
+       if (ret < 0)
+               goto error;
+
+       gcam->module.priv = gcam;
+       gcam->module.ops = &gb_cam_ops;
+       gcam->module.interface_id = gcam->connection->intf->interface_id;
+       gcam->module.release = gb_camera_release_module;
+       ret = gb_camera_register(&gcam->module);
+       if (ret < 0)
+               goto error;
+
+       greybus_set_drvdata(bundle, gcam);
+
+       gb_pm_runtime_put_autosuspend(gcam->bundle);
+
+       return 0;
+
+error:
+       gb_camera_cleanup(gcam);
+       kfree(gcam);
+       return ret;
+}
+
+static void gb_camera_disconnect(struct gb_bundle *bundle)
+{
+       struct gb_camera *gcam = greybus_get_drvdata(bundle);
+       int ret;
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret)
+               gb_pm_runtime_get_noresume(bundle);
+
+       gb_camera_cleanup(gcam);
+       gb_camera_unregister(&gcam->module);
+}
+
+static const struct greybus_bundle_id gb_camera_id_table[] = {
+       { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_CAMERA) },
+       { },
+};
+
+#ifdef CONFIG_PM_RUNTIME
+static int gb_camera_suspend(struct device *dev)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+       struct gb_camera *gcam = greybus_get_drvdata(bundle);
+
+       if (gcam->data_connection)
+               gb_connection_disable(gcam->data_connection);
+
+       gb_connection_disable(gcam->connection);
+
+       return 0;
+}
+
+static int gb_camera_resume(struct device *dev)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+       struct gb_camera *gcam = greybus_get_drvdata(bundle);
+       int ret;
+
+       ret = gb_connection_enable(gcam->connection);
+       if (ret) {
+               gcam_err(gcam, "failed to enable connection: %d\n", ret);
+               return ret;
+       }
+
+       if (gcam->data_connection) {
+               ret = gb_connection_enable(gcam->data_connection);
+               if (ret) {
+                       gcam_err(gcam,
+                                "failed to enable data connection: %d\n", ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_camera_pm_ops = {
+       SET_RUNTIME_PM_OPS(gb_camera_suspend, gb_camera_resume, NULL)
+};
+
+static struct greybus_driver gb_camera_driver = {
+       .name           = "camera",
+       .probe          = gb_camera_probe,
+       .disconnect     = gb_camera_disconnect,
+       .id_table       = gb_camera_id_table,
+       .driver.pm      = &gb_camera_pm_ops,
+};
+
+module_greybus_driver(gb_camera_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/connection.c b/drivers/staging/greybus/connection.c
new file mode 100644 (file)
index 0000000..5570751
--- /dev/null
@@ -0,0 +1,938 @@
+/*
+ * Greybus connections
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/workqueue.h>
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+
+#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT    1000
+
+
+static void gb_connection_kref_release(struct kref *kref);
+
+
+static DEFINE_SPINLOCK(gb_connections_lock);
+static DEFINE_MUTEX(gb_connection_mutex);
+
+
+/* Caller holds gb_connection_mutex. */
+static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
+{
+       struct gb_host_device *hd = intf->hd;
+       struct gb_connection *connection;
+
+       list_for_each_entry(connection, &hd->connections, hd_links) {
+               if (connection->intf == intf &&
+                               connection->intf_cport_id == cport_id)
+                       return true;
+       }
+
+       return false;
+}
+
+static void gb_connection_get(struct gb_connection *connection)
+{
+       kref_get(&connection->kref);
+
+       trace_gb_connection_get(connection);
+}
+
+static void gb_connection_put(struct gb_connection *connection)
+{
+       trace_gb_connection_put(connection);
+
+       kref_put(&connection->kref, gb_connection_kref_release);
+}
+
+/*
+ * Returns a reference-counted pointer to the connection if found.
+ */
+static struct gb_connection *
+gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
+{
+       struct gb_connection *connection;
+       unsigned long flags;
+
+       spin_lock_irqsave(&gb_connections_lock, flags);
+       list_for_each_entry(connection, &hd->connections, hd_links)
+               if (connection->hd_cport_id == cport_id) {
+                       gb_connection_get(connection);
+                       goto found;
+               }
+       connection = NULL;
+found:
+       spin_unlock_irqrestore(&gb_connections_lock, flags);
+
+       return connection;
+}
+
+/*
+ * Callback from the host driver to let us know that data has been
+ * received on the bundle.
+ */
+void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
+                       u8 *data, size_t length)
+{
+       struct gb_connection *connection;
+
+       trace_gb_hd_in(hd);
+
+       connection = gb_connection_hd_find(hd, cport_id);
+       if (!connection) {
+               dev_err(&hd->dev,
+                       "nonexistent connection (%zu bytes dropped)\n", length);
+               return;
+       }
+       gb_connection_recv(connection, data, length);
+       gb_connection_put(connection);
+}
+EXPORT_SYMBOL_GPL(greybus_data_rcvd);
+
+static void gb_connection_kref_release(struct kref *kref)
+{
+       struct gb_connection *connection;
+
+       connection = container_of(kref, struct gb_connection, kref);
+
+       trace_gb_connection_release(connection);
+
+       kfree(connection);
+}
+
+static void gb_connection_init_name(struct gb_connection *connection)
+{
+       u16 hd_cport_id = connection->hd_cport_id;
+       u16 cport_id = 0;
+       u8 intf_id = 0;
+
+       if (connection->intf) {
+               intf_id = connection->intf->interface_id;
+               cport_id = connection->intf_cport_id;
+       }
+
+       snprintf(connection->name, sizeof(connection->name),
+                       "%u/%u:%u", hd_cport_id, intf_id, cport_id);
+}
+
+/*
+ * _gb_connection_create() - create a Greybus connection
+ * @hd:                        host device of the connection
+ * @hd_cport_id:       host-device cport id, or -1 for dynamic allocation
+ * @intf:              remote interface, or NULL for static connections
+ * @bundle:            remote-interface bundle (may be NULL)
+ * @cport_id:          remote-interface cport id, or 0 for static connections
+ * @handler:           request handler (may be NULL)
+ * @flags:             connection flags
+ *
+ * Create a Greybus connection, representing the bidirectional link
+ * between a CPort on a (local) Greybus host device and a CPort on
+ * another Greybus interface.
+ *
+ * A connection also maintains the state of operations sent over the
+ * connection.
+ *
+ * Serialised against concurrent create and destroy using the
+ * gb_connection_mutex.
+ *
+ * Return: A pointer to the new connection if successful, or an ERR_PTR
+ * otherwise.
+ */
+static struct gb_connection *
+_gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
+                               struct gb_interface *intf,
+                               struct gb_bundle *bundle, int cport_id,
+                               gb_request_handler_t handler,
+                               unsigned long flags)
+{
+       struct gb_connection *connection;
+       int ret;
+
+       mutex_lock(&gb_connection_mutex);
+
+       if (intf && gb_connection_cport_in_use(intf, cport_id)) {
+               dev_err(&intf->dev, "cport %u already in use\n", cport_id);
+               ret = -EBUSY;
+               goto err_unlock;
+       }
+
+       ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
+       if (ret < 0) {
+               dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
+               goto err_unlock;
+       }
+       hd_cport_id = ret;
+
+       connection = kzalloc(sizeof(*connection), GFP_KERNEL);
+       if (!connection) {
+               ret = -ENOMEM;
+               goto err_hd_cport_release;
+       }
+
+       connection->hd_cport_id = hd_cport_id;
+       connection->intf_cport_id = cport_id;
+       connection->hd = hd;
+       connection->intf = intf;
+       connection->bundle = bundle;
+       connection->handler = handler;
+       connection->flags = flags;
+       if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
+               connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
+       connection->state = GB_CONNECTION_STATE_DISABLED;
+
+       atomic_set(&connection->op_cycle, 0);
+       mutex_init(&connection->mutex);
+       spin_lock_init(&connection->lock);
+       INIT_LIST_HEAD(&connection->operations);
+
+       connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
+                                        dev_name(&hd->dev), hd_cport_id);
+       if (!connection->wq) {
+               ret = -ENOMEM;
+               goto err_free_connection;
+       }
+
+       kref_init(&connection->kref);
+
+       gb_connection_init_name(connection);
+
+       spin_lock_irq(&gb_connections_lock);
+       list_add(&connection->hd_links, &hd->connections);
+
+       if (bundle)
+               list_add(&connection->bundle_links, &bundle->connections);
+       else
+               INIT_LIST_HEAD(&connection->bundle_links);
+
+       spin_unlock_irq(&gb_connections_lock);
+
+       mutex_unlock(&gb_connection_mutex);
+
+       trace_gb_connection_create(connection);
+
+       return connection;
+
+err_free_connection:
+       kfree(connection);
+err_hd_cport_release:
+       gb_hd_cport_release(hd, hd_cport_id);
+err_unlock:
+       mutex_unlock(&gb_connection_mutex);
+
+       return ERR_PTR(ret);
+}
+
+struct gb_connection *
+gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
+                                       gb_request_handler_t handler)
+{
+       return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
+                                       GB_CONNECTION_FLAG_HIGH_PRIO);
+}
+
+struct gb_connection *
+gb_connection_create_control(struct gb_interface *intf)
+{
+       return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
+                                       GB_CONNECTION_FLAG_CONTROL |
+                                       GB_CONNECTION_FLAG_HIGH_PRIO);
+}
+
+struct gb_connection *
+gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
+                                       gb_request_handler_t handler)
+{
+       struct gb_interface *intf = bundle->intf;
+
+       return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
+                                       handler, 0);
+}
+EXPORT_SYMBOL_GPL(gb_connection_create);
+
+struct gb_connection *
+gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
+                                       gb_request_handler_t handler,
+                                       unsigned long flags)
+{
+       struct gb_interface *intf = bundle->intf;
+
+       if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
+               flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
+
+       return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
+                                       handler, flags);
+}
+EXPORT_SYMBOL_GPL(gb_connection_create_flags);
+
+struct gb_connection *
+gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
+                                       unsigned long flags)
+{
+       flags |= GB_CONNECTION_FLAG_OFFLOADED;
+
+       return gb_connection_create_flags(bundle, cport_id, NULL, flags);
+}
+EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
+
+static int gb_connection_hd_cport_enable(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       int ret;
+
+       if (!hd->driver->cport_enable)
+               return 0;
+
+       ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
+                                       connection->flags);
+       if (ret) {
+               dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
+                               connection->name, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void gb_connection_hd_cport_disable(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       int ret;
+
+       if (!hd->driver->cport_disable)
+               return;
+
+       ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
+       if (ret) {
+               dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
+                               connection->name, ret);
+       }
+}
+
+static int gb_connection_hd_cport_connected(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       int ret;
+
+       if (!hd->driver->cport_connected)
+               return 0;
+
+       ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
+       if (ret) {
+               dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
+                               connection->name, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_connection_hd_cport_flush(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       int ret;
+
+       if (!hd->driver->cport_flush)
+               return 0;
+
+       ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
+       if (ret) {
+               dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
+                               connection->name, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       size_t peer_space;
+       int ret;
+
+       peer_space = sizeof(struct gb_operation_msg_hdr) +
+                       sizeof(struct gb_cport_shutdown_request);
+
+       if (connection->mode_switch)
+               peer_space += sizeof(struct gb_operation_msg_hdr);
+
+       ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
+                                       peer_space,
+                                       GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
+       if (ret) {
+               dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
+                               connection->name, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_connection_hd_cport_clear(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       int ret;
+
+       ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
+       if (ret) {
+               dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
+                               connection->name, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+/*
+ * Request the SVC to create a connection from AP's cport to interface's
+ * cport.
+ */
+static int
+gb_connection_svc_connection_create(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       struct gb_interface *intf;
+       u8 cport_flags;
+       int ret;
+
+       if (gb_connection_is_static(connection))
+               return 0;
+
+       intf = connection->intf;
+
+       /*
+        * Enable either E2EFC or CSD, unless no flow control is requested.
+        */
+       cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
+       if (gb_connection_flow_control_disabled(connection)) {
+               cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
+       } else if (gb_connection_e2efc_enabled(connection)) {
+               cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
+                               GB_SVC_CPORT_FLAG_E2EFC;
+       }
+
+       ret = gb_svc_connection_create(hd->svc,
+                       hd->svc->ap_intf_id,
+                       connection->hd_cport_id,
+                       intf->interface_id,
+                       connection->intf_cport_id,
+                       cport_flags);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: failed to create svc connection: %d\n",
+                       connection->name, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void
+gb_connection_svc_connection_destroy(struct gb_connection *connection)
+{
+       if (gb_connection_is_static(connection))
+               return;
+
+       gb_svc_connection_destroy(connection->hd->svc,
+                                 connection->hd->svc->ap_intf_id,
+                                 connection->hd_cport_id,
+                                 connection->intf->interface_id,
+                                 connection->intf_cport_id);
+}
+
+/* Inform Interface about active CPorts */
+static int gb_connection_control_connected(struct gb_connection *connection)
+{
+       struct gb_control *control;
+       u16 cport_id = connection->intf_cport_id;
+       int ret;
+
+       if (gb_connection_is_static(connection))
+               return 0;
+
+       if (gb_connection_is_control(connection))
+               return 0;
+
+       control = connection->intf->control;
+
+       ret = gb_control_connected_operation(control, cport_id);
+       if (ret) {
+               dev_err(&connection->bundle->dev,
+                       "failed to connect cport: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void
+gb_connection_control_disconnecting(struct gb_connection *connection)
+{
+       struct gb_control *control;
+       u16 cport_id = connection->intf_cport_id;
+       int ret;
+
+       if (gb_connection_is_static(connection))
+               return;
+
+       control = connection->intf->control;
+
+       ret = gb_control_disconnecting_operation(control, cport_id);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                               "%s: failed to send disconnecting: %d\n",
+                               connection->name, ret);
+       }
+}
+
+static void
+gb_connection_control_disconnected(struct gb_connection *connection)
+{
+       struct gb_control *control;
+       u16 cport_id = connection->intf_cport_id;
+       int ret;
+
+       if (gb_connection_is_static(connection))
+               return;
+
+       control = connection->intf->control;
+
+       if (gb_connection_is_control(connection)) {
+               if (connection->mode_switch) {
+                       ret = gb_control_mode_switch_operation(control);
+                       if (ret) {
+                               /*
+                                * Allow mode switch to time out waiting for
+                                * mailbox event.
+                                */
+                               return;
+                       }
+               }
+
+               return;
+       }
+
+       ret = gb_control_disconnected_operation(control, cport_id);
+       if (ret) {
+               dev_warn(&connection->bundle->dev,
+                        "failed to disconnect cport: %d\n", ret);
+       }
+}
+
+static int gb_connection_shutdown_operation(struct gb_connection *connection,
+                                               u8 phase)
+{
+       struct gb_cport_shutdown_request *req;
+       struct gb_operation *operation;
+       int ret;
+
+       operation = gb_operation_create_core(connection,
+                                               GB_REQUEST_TYPE_CPORT_SHUTDOWN,
+                                               sizeof(*req), 0, 0,
+                                               GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       req = operation->request->payload;
+       req->phase = phase;
+
+       ret = gb_operation_request_send_sync(operation);
+
+       gb_operation_put(operation);
+
+       return ret;
+}
+
+static int gb_connection_cport_shutdown(struct gb_connection *connection,
+                                       u8 phase)
+{
+       struct gb_host_device *hd = connection->hd;
+       const struct gb_hd_driver *drv = hd->driver;
+       int ret;
+
+       if (gb_connection_is_static(connection))
+               return 0;
+
+       if (gb_connection_is_offloaded(connection)) {
+               if (!drv->cport_shutdown)
+                       return 0;
+
+               ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
+                                               GB_OPERATION_TIMEOUT_DEFAULT);
+       } else {
+               ret = gb_connection_shutdown_operation(connection, phase);
+       }
+
+       if (ret) {
+               dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
+                               connection->name, phase, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int
+gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
+{
+       return gb_connection_cport_shutdown(connection, 1);
+}
+
+static int
+gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
+{
+       return gb_connection_cport_shutdown(connection, 2);
+}
+
+/*
+ * Cancel all active operations on a connection.
+ *
+ * Locking: Called with connection lock held and state set to DISABLED or
+ * DISCONNECTING.
+ */
+static void gb_connection_cancel_operations(struct gb_connection *connection,
+                                               int errno)
+       __must_hold(&connection->lock)
+{
+       struct gb_operation *operation;
+
+       while (!list_empty(&connection->operations)) {
+               operation = list_last_entry(&connection->operations,
+                                               struct gb_operation, links);
+               gb_operation_get(operation);
+               spin_unlock_irq(&connection->lock);
+
+               if (gb_operation_is_incoming(operation))
+                       gb_operation_cancel_incoming(operation, errno);
+               else
+                       gb_operation_cancel(operation, errno);
+
+               gb_operation_put(operation);
+
+               spin_lock_irq(&connection->lock);
+       }
+}
+
+/*
+ * Cancel all active incoming operations on a connection.
+ *
+ * Locking: Called with connection lock held and state set to ENABLED_TX.
+ */
+static void
+gb_connection_flush_incoming_operations(struct gb_connection *connection,
+                                               int errno)
+       __must_hold(&connection->lock)
+{
+       struct gb_operation *operation;
+       bool incoming;
+
+       while (!list_empty(&connection->operations)) {
+               incoming = false;
+               list_for_each_entry(operation, &connection->operations,
+                                                               links) {
+                       if (gb_operation_is_incoming(operation)) {
+                               gb_operation_get(operation);
+                               incoming = true;
+                               break;
+                       }
+               }
+
+               if (!incoming)
+                       break;
+
+               spin_unlock_irq(&connection->lock);
+
+               /* FIXME: flush, not cancel? */
+               gb_operation_cancel_incoming(operation, errno);
+               gb_operation_put(operation);
+
+               spin_lock_irq(&connection->lock);
+       }
+}
+
+/*
+ * _gb_connection_enable() - enable a connection
+ * @connection:                connection to enable
+ * @rx:                        whether to enable incoming requests
+ *
+ * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
+ * ENABLED_TX->ENABLED state transitions.
+ *
+ * Locking: Caller holds connection->mutex.
+ */
+static int _gb_connection_enable(struct gb_connection *connection, bool rx)
+{
+       int ret;
+
+       /* Handle ENABLED_TX -> ENABLED transitions. */
+       if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
+               if (!(connection->handler && rx))
+                       return 0;
+
+               spin_lock_irq(&connection->lock);
+               connection->state = GB_CONNECTION_STATE_ENABLED;
+               spin_unlock_irq(&connection->lock);
+
+               return 0;
+       }
+
+       ret = gb_connection_hd_cport_enable(connection);
+       if (ret)
+               return ret;
+
+       ret = gb_connection_svc_connection_create(connection);
+       if (ret)
+               goto err_hd_cport_clear;
+
+       ret = gb_connection_hd_cport_connected(connection);
+       if (ret)
+               goto err_svc_connection_destroy;
+
+       spin_lock_irq(&connection->lock);
+       if (connection->handler && rx)
+               connection->state = GB_CONNECTION_STATE_ENABLED;
+       else
+               connection->state = GB_CONNECTION_STATE_ENABLED_TX;
+       spin_unlock_irq(&connection->lock);
+
+       ret = gb_connection_control_connected(connection);
+       if (ret)
+               goto err_control_disconnecting;
+
+       return 0;
+
+err_control_disconnecting:
+       spin_lock_irq(&connection->lock);
+       connection->state = GB_CONNECTION_STATE_DISCONNECTING;
+       gb_connection_cancel_operations(connection, -ESHUTDOWN);
+       spin_unlock_irq(&connection->lock);
+
+       /* Transmit queue should already be empty. */
+       gb_connection_hd_cport_flush(connection);
+
+       gb_connection_control_disconnecting(connection);
+       gb_connection_cport_shutdown_phase_1(connection);
+       gb_connection_hd_cport_quiesce(connection);
+       gb_connection_cport_shutdown_phase_2(connection);
+       gb_connection_control_disconnected(connection);
+       connection->state = GB_CONNECTION_STATE_DISABLED;
+err_svc_connection_destroy:
+       gb_connection_svc_connection_destroy(connection);
+err_hd_cport_clear:
+       gb_connection_hd_cport_clear(connection);
+
+       gb_connection_hd_cport_disable(connection);
+
+       return ret;
+}
+
+int gb_connection_enable(struct gb_connection *connection)
+{
+       int ret = 0;
+
+       mutex_lock(&connection->mutex);
+
+       if (connection->state == GB_CONNECTION_STATE_ENABLED)
+               goto out_unlock;
+
+       ret = _gb_connection_enable(connection, true);
+       if (!ret)
+               trace_gb_connection_enable(connection);
+
+out_unlock:
+       mutex_unlock(&connection->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_connection_enable);
+
+int gb_connection_enable_tx(struct gb_connection *connection)
+{
+       int ret = 0;
+
+       mutex_lock(&connection->mutex);
+
+       if (connection->state == GB_CONNECTION_STATE_ENABLED) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
+               goto out_unlock;
+
+       ret = _gb_connection_enable(connection, false);
+       if (!ret)
+               trace_gb_connection_enable(connection);
+
+out_unlock:
+       mutex_unlock(&connection->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
+
+void gb_connection_disable_rx(struct gb_connection *connection)
+{
+       mutex_lock(&connection->mutex);
+
+       spin_lock_irq(&connection->lock);
+       if (connection->state != GB_CONNECTION_STATE_ENABLED) {
+               spin_unlock_irq(&connection->lock);
+               goto out_unlock;
+       }
+       connection->state = GB_CONNECTION_STATE_ENABLED_TX;
+       gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
+       spin_unlock_irq(&connection->lock);
+
+       trace_gb_connection_disable(connection);
+
+out_unlock:
+       mutex_unlock(&connection->mutex);
+}
+EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
+
+void gb_connection_mode_switch_prepare(struct gb_connection *connection)
+{
+       connection->mode_switch = true;
+}
+
+void gb_connection_mode_switch_complete(struct gb_connection *connection)
+{
+       gb_connection_svc_connection_destroy(connection);
+       gb_connection_hd_cport_clear(connection);
+
+       gb_connection_hd_cport_disable(connection);
+
+       connection->mode_switch = false;
+}
+
+void gb_connection_disable(struct gb_connection *connection)
+{
+       mutex_lock(&connection->mutex);
+
+       if (connection->state == GB_CONNECTION_STATE_DISABLED)
+               goto out_unlock;
+
+       trace_gb_connection_disable(connection);
+
+       spin_lock_irq(&connection->lock);
+       connection->state = GB_CONNECTION_STATE_DISCONNECTING;
+       gb_connection_cancel_operations(connection, -ESHUTDOWN);
+       spin_unlock_irq(&connection->lock);
+
+       gb_connection_hd_cport_flush(connection);
+
+       gb_connection_control_disconnecting(connection);
+       gb_connection_cport_shutdown_phase_1(connection);
+       gb_connection_hd_cport_quiesce(connection);
+       gb_connection_cport_shutdown_phase_2(connection);
+       gb_connection_control_disconnected(connection);
+
+       connection->state = GB_CONNECTION_STATE_DISABLED;
+
+       /* control-connection tear down is deferred when mode switching */
+       if (!connection->mode_switch) {
+               gb_connection_svc_connection_destroy(connection);
+               gb_connection_hd_cport_clear(connection);
+
+               gb_connection_hd_cport_disable(connection);
+       }
+
+out_unlock:
+       mutex_unlock(&connection->mutex);
+}
+EXPORT_SYMBOL_GPL(gb_connection_disable);
+
+/* Disable a connection without communicating with the remote end. */
+void gb_connection_disable_forced(struct gb_connection *connection)
+{
+       mutex_lock(&connection->mutex);
+
+       if (connection->state == GB_CONNECTION_STATE_DISABLED)
+               goto out_unlock;
+
+       trace_gb_connection_disable(connection);
+
+       spin_lock_irq(&connection->lock);
+       connection->state = GB_CONNECTION_STATE_DISABLED;
+       gb_connection_cancel_operations(connection, -ESHUTDOWN);
+       spin_unlock_irq(&connection->lock);
+
+       gb_connection_hd_cport_flush(connection);
+
+       gb_connection_svc_connection_destroy(connection);
+       gb_connection_hd_cport_clear(connection);
+
+       gb_connection_hd_cport_disable(connection);
+out_unlock:
+       mutex_unlock(&connection->mutex);
+}
+EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
+
+/* Caller must have disabled the connection before destroying it. */
+void gb_connection_destroy(struct gb_connection *connection)
+{
+       if (!connection)
+               return;
+
+       if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
+               gb_connection_disable(connection);
+
+       mutex_lock(&gb_connection_mutex);
+
+       spin_lock_irq(&gb_connections_lock);
+       list_del(&connection->bundle_links);
+       list_del(&connection->hd_links);
+       spin_unlock_irq(&gb_connections_lock);
+
+       destroy_workqueue(connection->wq);
+
+       gb_hd_cport_release(connection->hd, connection->hd_cport_id);
+       connection->hd_cport_id = CPORT_ID_BAD;
+
+       mutex_unlock(&gb_connection_mutex);
+
+       gb_connection_put(connection);
+}
+EXPORT_SYMBOL_GPL(gb_connection_destroy);
+
+void gb_connection_latency_tag_enable(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       int ret;
+
+       if (!hd->driver->latency_tag_enable)
+               return;
+
+       ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: failed to enable latency tag: %d\n",
+                       connection->name, ret);
+       }
+}
+EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
+
+void gb_connection_latency_tag_disable(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       int ret;
+
+       if (!hd->driver->latency_tag_disable)
+               return;
+
+       ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: failed to disable latency tag: %d\n",
+                       connection->name, ret);
+       }
+}
+EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
diff --git a/drivers/staging/greybus/connection.h b/drivers/staging/greybus/connection.h
new file mode 100644 (file)
index 0000000..4d9f4c6
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Greybus connections
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __CONNECTION_H
+#define __CONNECTION_H
+
+#include <linux/list.h>
+#include <linux/kfifo.h>
+
+#define GB_CONNECTION_FLAG_CSD         BIT(0)
+#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1)
+#define GB_CONNECTION_FLAG_OFFLOADED   BIT(2)
+#define GB_CONNECTION_FLAG_CDSI1       BIT(3)
+#define GB_CONNECTION_FLAG_CONTROL     BIT(4)
+#define GB_CONNECTION_FLAG_HIGH_PRIO   BIT(5)
+
+#define GB_CONNECTION_FLAG_CORE_MASK   GB_CONNECTION_FLAG_CONTROL
+
+enum gb_connection_state {
+       GB_CONNECTION_STATE_DISABLED            = 0,
+       GB_CONNECTION_STATE_ENABLED_TX          = 1,
+       GB_CONNECTION_STATE_ENABLED             = 2,
+       GB_CONNECTION_STATE_DISCONNECTING       = 3,
+};
+
+struct gb_operation;
+
+typedef int (*gb_request_handler_t)(struct gb_operation *);
+
+struct gb_connection {
+       struct gb_host_device           *hd;
+       struct gb_interface             *intf;
+       struct gb_bundle                *bundle;
+       struct kref                     kref;
+       u16                             hd_cport_id;
+       u16                             intf_cport_id;
+
+       struct list_head                hd_links;
+       struct list_head                bundle_links;
+
+       gb_request_handler_t            handler;
+       unsigned long                   flags;
+
+       struct mutex                    mutex;
+       spinlock_t                      lock;
+       enum gb_connection_state        state;
+       struct list_head                operations;
+
+       char                            name[16];
+       struct workqueue_struct         *wq;
+
+       atomic_t                        op_cycle;
+
+       void                            *private;
+
+       bool                            mode_switch;
+};
+
+struct gb_connection *gb_connection_create_static(struct gb_host_device *hd,
+                               u16 hd_cport_id, gb_request_handler_t handler);
+struct gb_connection *gb_connection_create_control(struct gb_interface *intf);
+struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
+                               u16 cport_id, gb_request_handler_t handler);
+struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle,
+                               u16 cport_id, gb_request_handler_t handler,
+                               unsigned long flags);
+struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle,
+                               u16 cport_id, unsigned long flags);
+void gb_connection_destroy(struct gb_connection *connection);
+
+static inline bool gb_connection_is_static(struct gb_connection *connection)
+{
+       return !connection->intf;
+}
+
+int gb_connection_enable(struct gb_connection *connection);
+int gb_connection_enable_tx(struct gb_connection *connection);
+void gb_connection_disable_rx(struct gb_connection *connection);
+void gb_connection_disable(struct gb_connection *connection);
+void gb_connection_disable_forced(struct gb_connection *connection);
+
+void gb_connection_mode_switch_prepare(struct gb_connection *connection);
+void gb_connection_mode_switch_complete(struct gb_connection *connection);
+
+void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
+                       u8 *data, size_t length);
+
+void gb_connection_latency_tag_enable(struct gb_connection *connection);
+void gb_connection_latency_tag_disable(struct gb_connection *connection);
+
+static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection)
+{
+       return !(connection->flags & GB_CONNECTION_FLAG_CSD);
+}
+
+static inline bool
+gb_connection_flow_control_disabled(struct gb_connection *connection)
+{
+       return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL;
+}
+
+static inline bool gb_connection_is_offloaded(struct gb_connection *connection)
+{
+       return connection->flags & GB_CONNECTION_FLAG_OFFLOADED;
+}
+
+static inline bool gb_connection_is_control(struct gb_connection *connection)
+{
+       return connection->flags & GB_CONNECTION_FLAG_CONTROL;
+}
+
+static inline void *gb_connection_get_data(struct gb_connection *connection)
+{
+       return connection->private;
+}
+
+static inline void gb_connection_set_data(struct gb_connection *connection,
+                                         void *data)
+{
+       connection->private = data;
+}
+
+#endif /* __CONNECTION_H */
diff --git a/drivers/staging/greybus/control.c b/drivers/staging/greybus/control.c
new file mode 100644 (file)
index 0000000..4716190
--- /dev/null
@@ -0,0 +1,635 @@
+/*
+ * Greybus CPort control protocol.
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "greybus.h"
+
+/* Highest control-protocol version supported */
+#define GB_CONTROL_VERSION_MAJOR       0
+#define GB_CONTROL_VERSION_MINOR       1
+
+
+static int gb_control_get_version(struct gb_control *control)
+{
+       struct gb_interface *intf = control->connection->intf;
+       struct gb_control_version_request request;
+       struct gb_control_version_response response;
+       int ret;
+
+       request.major = GB_CONTROL_VERSION_MAJOR;
+       request.minor = GB_CONTROL_VERSION_MINOR;
+
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_VERSION,
+                               &request, sizeof(request), &response,
+                               sizeof(response));
+       if (ret) {
+               dev_err(&intf->dev,
+                               "failed to get control-protocol version: %d\n",
+                               ret);
+               return ret;
+       }
+
+       if (response.major > request.major) {
+               dev_err(&intf->dev,
+                               "unsupported major control-protocol version (%u > %u)\n",
+                               response.major, request.major);
+               return -ENOTSUPP;
+       }
+
+       control->protocol_major = response.major;
+       control->protocol_minor = response.minor;
+
+       dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major,
+                       response.minor);
+
+       return 0;
+}
+
+static int gb_control_get_bundle_version(struct gb_control *control,
+                                               struct gb_bundle *bundle)
+{
+       struct gb_interface *intf = control->connection->intf;
+       struct gb_control_bundle_version_request request;
+       struct gb_control_bundle_version_response response;
+       int ret;
+
+       request.bundle_id = bundle->id;
+
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_BUNDLE_VERSION,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&intf->dev,
+                               "failed to get bundle %u class version: %d\n",
+                               bundle->id, ret);
+               return ret;
+       }
+
+       bundle->class_major = response.major;
+       bundle->class_minor = response.minor;
+
+       dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id,
+                       response.major, response.minor);
+
+       return 0;
+}
+
+int gb_control_get_bundle_versions(struct gb_control *control)
+{
+       struct gb_interface *intf = control->connection->intf;
+       struct gb_bundle *bundle;
+       int ret;
+
+       if (!control->has_bundle_version)
+               return 0;
+
+       list_for_each_entry(bundle, &intf->bundles, links) {
+               ret = gb_control_get_bundle_version(control, bundle);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/* Get Manifest's size from the interface */
+int gb_control_get_manifest_size_operation(struct gb_interface *intf)
+{
+       struct gb_control_get_manifest_size_response response;
+       struct gb_connection *connection = intf->control->connection;
+       int ret;
+
+       ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE,
+                               NULL, 0, &response, sizeof(response));
+       if (ret) {
+               dev_err(&connection->intf->dev,
+                               "failed to get manifest size: %d\n", ret);
+               return ret;
+       }
+
+       return le16_to_cpu(response.size);
+}
+
+/* Reads Manifest from the interface */
+int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
+                                     size_t size)
+{
+       struct gb_connection *connection = intf->control->connection;
+
+       return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST,
+                               NULL, 0, manifest, size);
+}
+
+int gb_control_connected_operation(struct gb_control *control, u16 cport_id)
+{
+       struct gb_control_connected_request request;
+
+       request.cport_id = cpu_to_le16(cport_id);
+       return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED,
+                                &request, sizeof(request), NULL, 0);
+}
+
+int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id)
+{
+       struct gb_control_disconnected_request request;
+
+       request.cport_id = cpu_to_le16(cport_id);
+       return gb_operation_sync(control->connection,
+                                GB_CONTROL_TYPE_DISCONNECTED, &request,
+                                sizeof(request), NULL, 0);
+}
+
+int gb_control_disconnecting_operation(struct gb_control *control,
+                                       u16 cport_id)
+{
+       struct gb_control_disconnecting_request *request;
+       struct gb_operation *operation;
+       int ret;
+
+       operation = gb_operation_create_core(control->connection,
+                                       GB_CONTROL_TYPE_DISCONNECTING,
+                                       sizeof(*request), 0, 0,
+                                       GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       request = operation->request->payload;
+       request->cport_id = cpu_to_le16(cport_id);
+
+       ret = gb_operation_request_send_sync(operation);
+       if (ret) {
+               dev_err(&control->dev, "failed to send disconnecting: %d\n",
+                               ret);
+       }
+
+       gb_operation_put(operation);
+
+       return ret;
+}
+
+int gb_control_mode_switch_operation(struct gb_control *control)
+{
+       struct gb_operation *operation;
+       int ret;
+
+       operation = gb_operation_create_core(control->connection,
+                                       GB_CONTROL_TYPE_MODE_SWITCH,
+                                       0, 0, GB_OPERATION_FLAG_UNIDIRECTIONAL,
+                                       GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       ret = gb_operation_request_send_sync(operation);
+       if (ret)
+               dev_err(&control->dev, "failed to send mode switch: %d\n", ret);
+
+       gb_operation_put(operation);
+
+       return ret;
+}
+
+int gb_control_timesync_enable(struct gb_control *control, u8 count,
+                              u64 frame_time, u32 strobe_delay, u32 refclk)
+{
+       struct gb_control_timesync_enable_request request;
+
+       request.count = count;
+       request.frame_time = cpu_to_le64(frame_time);
+       request.strobe_delay = cpu_to_le32(strobe_delay);
+       request.refclk = cpu_to_le32(refclk);
+       return gb_operation_sync(control->connection,
+                                GB_CONTROL_TYPE_TIMESYNC_ENABLE, &request,
+                                sizeof(request), NULL, 0);
+}
+
+int gb_control_timesync_disable(struct gb_control *control)
+{
+       return gb_operation_sync(control->connection,
+                                GB_CONTROL_TYPE_TIMESYNC_DISABLE, NULL, 0,
+                                NULL, 0);
+}
+
+int gb_control_timesync_get_last_event(struct gb_control *control,
+                                      u64 *frame_time)
+{
+       struct gb_control_timesync_get_last_event_response response;
+       int ret;
+
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT,
+                               NULL, 0, &response, sizeof(response));
+       if (!ret)
+               *frame_time = le64_to_cpu(response.frame_time);
+       return ret;
+}
+
+int gb_control_timesync_authoritative(struct gb_control *control,
+                                     u64 *frame_time)
+{
+       struct gb_control_timesync_authoritative_request request;
+       int i;
+
+       for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
+               request.frame_time[i] = cpu_to_le64(frame_time[i]);
+
+       return gb_operation_sync(control->connection,
+                                GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE,
+                                &request, sizeof(request),
+                                NULL, 0);
+}
+
+static int gb_control_bundle_pm_status_map(u8 status)
+{
+       switch (status) {
+       case GB_CONTROL_BUNDLE_PM_INVAL:
+               return -EINVAL;
+       case GB_CONTROL_BUNDLE_PM_BUSY:
+               return -EBUSY;
+       case GB_CONTROL_BUNDLE_PM_NA:
+               return -ENOMSG;
+       case GB_CONTROL_BUNDLE_PM_FAIL:
+       default:
+               return -EREMOTEIO;
+       }
+}
+
+int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id)
+{
+       struct gb_control_bundle_pm_request request;
+       struct gb_control_bundle_pm_response response;
+       int ret;
+
+       request.bundle_id = bundle_id;
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request,
+                               sizeof(request), &response, sizeof(response));
+       if (ret) {
+               dev_err(&control->dev, "failed to send bundle %u suspend: %d\n",
+                       bundle_id, ret);
+               return ret;
+       }
+
+       if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+               dev_err(&control->dev, "failed to suspend bundle %u: %d\n",
+                       bundle_id, response.status);
+               return gb_control_bundle_pm_status_map(response.status);
+       }
+
+       return 0;
+}
+
+int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id)
+{
+       struct gb_control_bundle_pm_request request;
+       struct gb_control_bundle_pm_response response;
+       int ret;
+
+       request.bundle_id = bundle_id;
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_BUNDLE_RESUME, &request,
+                               sizeof(request), &response, sizeof(response));
+       if (ret) {
+               dev_err(&control->dev, "failed to send bundle %u resume: %d\n",
+                       bundle_id, ret);
+               return ret;
+       }
+
+       if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+               dev_err(&control->dev, "failed to resume bundle %u: %d\n",
+                       bundle_id, response.status);
+               return gb_control_bundle_pm_status_map(response.status);
+       }
+
+       return 0;
+}
+
+int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id)
+{
+       struct gb_control_bundle_pm_request request;
+       struct gb_control_bundle_pm_response response;
+       int ret;
+
+       request.bundle_id = bundle_id;
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request,
+                               sizeof(request), &response, sizeof(response));
+       if (ret) {
+               dev_err(&control->dev,
+                       "failed to send bundle %u deactivate: %d\n", bundle_id,
+                       ret);
+               return ret;
+       }
+
+       if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+               dev_err(&control->dev, "failed to deactivate bundle %u: %d\n",
+                       bundle_id, response.status);
+               return gb_control_bundle_pm_status_map(response.status);
+       }
+
+       return 0;
+}
+
+int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id)
+{
+       struct gb_control_bundle_pm_request request;
+       struct gb_control_bundle_pm_response response;
+       int ret;
+
+       if (!control->has_bundle_activate)
+               return 0;
+
+       request.bundle_id = bundle_id;
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request,
+                               sizeof(request), &response, sizeof(response));
+       if (ret) {
+               dev_err(&control->dev,
+                       "failed to send bundle %u activate: %d\n", bundle_id,
+                       ret);
+               return ret;
+       }
+
+       if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+               dev_err(&control->dev, "failed to activate bundle %u: %d\n",
+                       bundle_id, response.status);
+               return gb_control_bundle_pm_status_map(response.status);
+       }
+
+       return 0;
+}
+
+static int gb_control_interface_pm_status_map(u8 status)
+{
+       switch (status) {
+       case GB_CONTROL_INTF_PM_BUSY:
+               return -EBUSY;
+       case GB_CONTROL_INTF_PM_NA:
+               return -ENOMSG;
+       default:
+               return -EREMOTEIO;
+       }
+}
+
+int gb_control_interface_suspend_prepare(struct gb_control *control)
+{
+       struct gb_control_intf_pm_response response;
+       int ret;
+
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0,
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&control->dev,
+                       "failed to send interface suspend prepare: %d\n", ret);
+               return ret;
+       }
+
+       if (response.status != GB_CONTROL_INTF_PM_OK) {
+               dev_err(&control->dev, "interface error while preparing suspend: %d\n",
+                       response.status);
+               return gb_control_interface_pm_status_map(response.status);
+       }
+
+       return 0;
+}
+
+int gb_control_interface_deactivate_prepare(struct gb_control *control)
+{
+       struct gb_control_intf_pm_response response;
+       int ret;
+
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL,
+                               0, &response, sizeof(response));
+       if (ret) {
+               dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n",
+                       ret);
+               return ret;
+       }
+
+       if (response.status != GB_CONTROL_INTF_PM_OK) {
+               dev_err(&control->dev, "interface error while preparing deactivate: %d\n",
+                       response.status);
+               return gb_control_interface_pm_status_map(response.status);
+       }
+
+       return 0;
+}
+
+int gb_control_interface_hibernate_abort(struct gb_control *control)
+{
+       struct gb_control_intf_pm_response response;
+       int ret;
+
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0,
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&control->dev,
+                       "failed to send interface aborting hibernate: %d\n",
+                       ret);
+               return ret;
+       }
+
+       if (response.status != GB_CONTROL_INTF_PM_OK) {
+               dev_err(&control->dev, "interface error while aborting hibernate: %d\n",
+                       response.status);
+               return gb_control_interface_pm_status_map(response.status);
+       }
+
+       return 0;
+}
+
+static ssize_t vendor_string_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct gb_control *control = to_gb_control(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string);
+}
+static DEVICE_ATTR_RO(vendor_string);
+
+static ssize_t product_string_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct gb_control *control = to_gb_control(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string);
+}
+static DEVICE_ATTR_RO(product_string);
+
+static struct attribute *control_attrs[] = {
+       &dev_attr_vendor_string.attr,
+       &dev_attr_product_string.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(control);
+
+static void gb_control_release(struct device *dev)
+{
+       struct gb_control *control = to_gb_control(dev);
+
+       gb_connection_destroy(control->connection);
+
+       kfree(control->vendor_string);
+       kfree(control->product_string);
+
+       kfree(control);
+}
+
+struct device_type greybus_control_type = {
+       .name =         "greybus_control",
+       .release =      gb_control_release,
+};
+
+struct gb_control *gb_control_create(struct gb_interface *intf)
+{
+       struct gb_connection *connection;
+       struct gb_control *control;
+
+       control = kzalloc(sizeof(*control), GFP_KERNEL);
+       if (!control)
+               return ERR_PTR(-ENOMEM);
+
+       control->intf = intf;
+
+       connection = gb_connection_create_control(intf);
+       if (IS_ERR(connection)) {
+               dev_err(&intf->dev,
+                               "failed to create control connection: %ld\n",
+                               PTR_ERR(connection));
+               kfree(control);
+               return ERR_CAST(connection);
+       }
+
+       control->connection = connection;
+
+       control->dev.parent = &intf->dev;
+       control->dev.bus = &greybus_bus_type;
+       control->dev.type = &greybus_control_type;
+       control->dev.groups = control_groups;
+       control->dev.dma_mask = intf->dev.dma_mask;
+       device_initialize(&control->dev);
+       dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev));
+
+       gb_connection_set_data(control->connection, control);
+
+       return control;
+}
+
+int gb_control_enable(struct gb_control *control)
+{
+       int ret;
+
+       dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
+
+       ret = gb_connection_enable_tx(control->connection);
+       if (ret) {
+               dev_err(&control->connection->intf->dev,
+                               "failed to enable control connection: %d\n",
+                               ret);
+               return ret;
+       }
+
+       ret = gb_control_get_version(control);
+       if (ret)
+               goto err_disable_connection;
+
+       if (control->protocol_major > 0 || control->protocol_minor > 1)
+               control->has_bundle_version = true;
+
+       /* FIXME: use protocol version instead */
+       if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE))
+               control->has_bundle_activate = true;
+
+       return 0;
+
+err_disable_connection:
+       gb_connection_disable(control->connection);
+
+       return ret;
+}
+
+void gb_control_disable(struct gb_control *control)
+{
+       dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
+
+       if (control->intf->disconnected)
+               gb_connection_disable_forced(control->connection);
+       else
+               gb_connection_disable(control->connection);
+}
+
+int gb_control_suspend(struct gb_control *control)
+{
+       gb_connection_disable(control->connection);
+
+       return 0;
+}
+
+int gb_control_resume(struct gb_control *control)
+{
+       int ret;
+
+       ret = gb_connection_enable_tx(control->connection);
+       if (ret) {
+               dev_err(&control->connection->intf->dev,
+                       "failed to enable control connection: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+int gb_control_add(struct gb_control *control)
+{
+       int ret;
+
+       ret = device_add(&control->dev);
+       if (ret) {
+               dev_err(&control->dev,
+                               "failed to register control device: %d\n",
+                               ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+void gb_control_del(struct gb_control *control)
+{
+       if (device_is_registered(&control->dev))
+               device_del(&control->dev);
+}
+
+struct gb_control *gb_control_get(struct gb_control *control)
+{
+       get_device(&control->dev);
+
+       return control;
+}
+
+void gb_control_put(struct gb_control *control)
+{
+       put_device(&control->dev);
+}
+
+void gb_control_mode_switch_prepare(struct gb_control *control)
+{
+       gb_connection_mode_switch_prepare(control->connection);
+}
+
+void gb_control_mode_switch_complete(struct gb_control *control)
+{
+       gb_connection_mode_switch_complete(control->connection);
+}
diff --git a/drivers/staging/greybus/control.h b/drivers/staging/greybus/control.h
new file mode 100644 (file)
index 0000000..f9a60da
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Greybus CPort control protocol
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __CONTROL_H
+#define __CONTROL_H
+
+struct gb_control {
+       struct device dev;
+       struct gb_interface *intf;
+
+       struct gb_connection *connection;
+
+       u8 protocol_major;
+       u8 protocol_minor;
+
+       bool has_bundle_activate;
+       bool has_bundle_version;
+
+       char *vendor_string;
+       char *product_string;
+};
+#define to_gb_control(d) container_of(d, struct gb_control, dev)
+
+struct gb_control *gb_control_create(struct gb_interface *intf);
+int gb_control_enable(struct gb_control *control);
+void gb_control_disable(struct gb_control *control);
+int gb_control_suspend(struct gb_control *control);
+int gb_control_resume(struct gb_control *control);
+int gb_control_add(struct gb_control *control);
+void gb_control_del(struct gb_control *control);
+struct gb_control *gb_control_get(struct gb_control *control);
+void gb_control_put(struct gb_control *control);
+
+int gb_control_get_bundle_versions(struct gb_control *control);
+int gb_control_connected_operation(struct gb_control *control, u16 cport_id);
+int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id);
+int gb_control_disconnecting_operation(struct gb_control *control,
+                                       u16 cport_id);
+int gb_control_mode_switch_operation(struct gb_control *control);
+void gb_control_mode_switch_prepare(struct gb_control *control);
+void gb_control_mode_switch_complete(struct gb_control *control);
+int gb_control_get_manifest_size_operation(struct gb_interface *intf);
+int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
+                                     size_t size);
+int gb_control_timesync_enable(struct gb_control *control, u8 count,
+                              u64 frame_time, u32 strobe_delay, u32 refclk);
+int gb_control_timesync_disable(struct gb_control *control);
+int gb_control_timesync_get_last_event(struct gb_control *control,
+                                      u64 *frame_time);
+int gb_control_timesync_authoritative(struct gb_control *control,
+                                     u64 *frame_time);
+int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id);
+int gb_control_interface_suspend_prepare(struct gb_control *control);
+int gb_control_interface_deactivate_prepare(struct gb_control *control);
+int gb_control_interface_hibernate_abort(struct gb_control *control);
+#endif /* __CONTROL_H */
diff --git a/drivers/staging/greybus/core.c b/drivers/staging/greybus/core.c
new file mode 100644 (file)
index 0000000..1049e9c
--- /dev/null
@@ -0,0 +1,361 @@
+/*
+ * Greybus "Core"
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define CREATE_TRACE_POINTS
+#include "greybus.h"
+#include "greybus_trace.h"
+
+#define GB_BUNDLE_AUTOSUSPEND_MS       3000
+
+/* Allow greybus to be disabled at boot if needed */
+static bool nogreybus;
+#ifdef MODULE
+module_param(nogreybus, bool, 0444);
+#else
+core_param(nogreybus, nogreybus, bool, 0444);
+#endif
+int greybus_disabled(void)
+{
+       return nogreybus;
+}
+EXPORT_SYMBOL_GPL(greybus_disabled);
+
+static bool greybus_match_one_id(struct gb_bundle *bundle,
+                                    const struct greybus_bundle_id *id)
+{
+       if ((id->match_flags & GREYBUS_ID_MATCH_VENDOR) &&
+           (id->vendor != bundle->intf->vendor_id))
+               return false;
+
+       if ((id->match_flags & GREYBUS_ID_MATCH_PRODUCT) &&
+           (id->product != bundle->intf->product_id))
+               return false;
+
+       if ((id->match_flags & GREYBUS_ID_MATCH_CLASS) &&
+           (id->class != bundle->class))
+               return false;
+
+       return true;
+}
+
+static const struct greybus_bundle_id *
+greybus_match_id(struct gb_bundle *bundle, const struct greybus_bundle_id *id)
+{
+       if (id == NULL)
+               return NULL;
+
+       for (; id->vendor || id->product || id->class || id->driver_info;
+                                                                       id++) {
+               if (greybus_match_one_id(bundle, id))
+                       return id;
+       }
+
+       return NULL;
+}
+
+static int greybus_match_device(struct device *dev, struct device_driver *drv)
+{
+       struct greybus_driver *driver = to_greybus_driver(drv);
+       struct gb_bundle *bundle;
+       const struct greybus_bundle_id *id;
+
+       if (!is_gb_bundle(dev))
+               return 0;
+
+       bundle = to_gb_bundle(dev);
+
+       id = greybus_match_id(bundle, driver->id_table);
+       if (id)
+               return 1;
+       /* FIXME - Dynamic ids? */
+       return 0;
+}
+
+static int greybus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct gb_host_device *hd;
+       struct gb_module *module = NULL;
+       struct gb_interface *intf = NULL;
+       struct gb_control *control = NULL;
+       struct gb_bundle *bundle = NULL;
+       struct gb_svc *svc = NULL;
+
+       if (is_gb_host_device(dev)) {
+               hd = to_gb_host_device(dev);
+       } else if (is_gb_module(dev)) {
+               module = to_gb_module(dev);
+               hd = module->hd;
+       } else if (is_gb_interface(dev)) {
+               intf = to_gb_interface(dev);
+               module = intf->module;
+               hd = intf->hd;
+       } else if (is_gb_control(dev)) {
+               control = to_gb_control(dev);
+               intf = control->intf;
+               module = intf->module;
+               hd = intf->hd;
+       } else if (is_gb_bundle(dev)) {
+               bundle = to_gb_bundle(dev);
+               intf = bundle->intf;
+               module = intf->module;
+               hd = intf->hd;
+       } else if (is_gb_svc(dev)) {
+               svc = to_gb_svc(dev);
+               hd = svc->hd;
+       } else {
+               dev_WARN(dev, "uevent for unknown greybus device \"type\"!\n");
+               return -EINVAL;
+       }
+
+       if (add_uevent_var(env, "BUS=%u", hd->bus_id))
+               return -ENOMEM;
+
+       if (module) {
+               if (add_uevent_var(env, "MODULE=%u", module->module_id))
+                       return -ENOMEM;
+       }
+
+       if (intf) {
+               if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
+                       return -ENOMEM;
+               if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
+                                  intf->vendor_id, intf->product_id))
+                       return -ENOMEM;
+       }
+
+       if (bundle) {
+               // FIXME
+               // add a uevent that can "load" a bundle type
+               // This is what we need to bind a driver to so use the info
+               // in gmod here as well
+
+               if (add_uevent_var(env, "BUNDLE=%u", bundle->id))
+                       return -ENOMEM;
+               if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void greybus_shutdown(struct device *dev)
+{
+       if (is_gb_host_device(dev)) {
+               struct gb_host_device *hd;
+
+               hd = to_gb_host_device(dev);
+               gb_hd_shutdown(hd);
+       }
+}
+
+struct bus_type greybus_bus_type = {
+       .name =         "greybus",
+       .match =        greybus_match_device,
+       .uevent =       greybus_uevent,
+       .shutdown =     greybus_shutdown,
+};
+
+static int greybus_probe(struct device *dev)
+{
+       struct greybus_driver *driver = to_greybus_driver(dev->driver);
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+       const struct greybus_bundle_id *id;
+       int retval;
+
+       /* match id */
+       id = greybus_match_id(bundle, driver->id_table);
+       if (!id)
+               return -ENODEV;
+
+       retval = pm_runtime_get_sync(&bundle->intf->dev);
+       if (retval < 0) {
+               pm_runtime_put_noidle(&bundle->intf->dev);
+               return retval;
+       }
+
+       retval = gb_control_bundle_activate(bundle->intf->control, bundle->id);
+       if (retval) {
+               pm_runtime_put(&bundle->intf->dev);
+               return retval;
+       }
+
+       /*
+        * Unbound bundle devices are always deactivated. During probe, the
+        * Runtime PM is set to enabled and active and the usage count is
+        * incremented. If the driver supports runtime PM, it should call
+        * pm_runtime_put() in its probe routine and pm_runtime_get_sync()
+        * in remove routine.
+        */
+       pm_runtime_set_autosuspend_delay(dev, GB_BUNDLE_AUTOSUSPEND_MS);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_get_noresume(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       retval = driver->probe(bundle, id);
+       if (retval) {
+               /*
+                * Catch buggy drivers that fail to destroy their connections.
+                */
+               WARN_ON(!list_empty(&bundle->connections));
+
+               gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
+
+               pm_runtime_disable(dev);
+               pm_runtime_set_suspended(dev);
+               pm_runtime_put_noidle(dev);
+               pm_runtime_dont_use_autosuspend(dev);
+               pm_runtime_put(&bundle->intf->dev);
+
+               return retval;
+       }
+
+       gb_timesync_schedule_synchronous(bundle->intf);
+
+       pm_runtime_put(&bundle->intf->dev);
+
+       return 0;
+}
+
+static int greybus_remove(struct device *dev)
+{
+       struct greybus_driver *driver = to_greybus_driver(dev->driver);
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+       struct gb_connection *connection;
+       int retval;
+
+       retval = pm_runtime_get_sync(dev);
+       if (retval < 0)
+               dev_err(dev, "failed to resume bundle: %d\n", retval);
+
+       /*
+        * Disable (non-offloaded) connections early in case the interface is
+        * already gone to avoid unceccessary operation timeouts during
+        * driver disconnect. Otherwise, only disable incoming requests.
+        */
+       list_for_each_entry(connection, &bundle->connections, bundle_links) {
+               if (gb_connection_is_offloaded(connection))
+                       continue;
+
+               if (bundle->intf->disconnected)
+                       gb_connection_disable_forced(connection);
+               else
+                       gb_connection_disable_rx(connection);
+       }
+
+       driver->disconnect(bundle);
+
+       /* Catch buggy drivers that fail to destroy their connections. */
+       WARN_ON(!list_empty(&bundle->connections));
+
+       if (!bundle->intf->disconnected)
+               gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
+
+       pm_runtime_put_noidle(dev);
+       pm_runtime_disable(dev);
+       pm_runtime_set_suspended(dev);
+       pm_runtime_dont_use_autosuspend(dev);
+       pm_runtime_put_noidle(dev);
+
+       return 0;
+}
+
+int greybus_register_driver(struct greybus_driver *driver, struct module *owner,
+               const char *mod_name)
+{
+       int retval;
+
+       if (greybus_disabled())
+               return -ENODEV;
+
+       driver->driver.bus = &greybus_bus_type;
+       driver->driver.name = driver->name;
+       driver->driver.probe = greybus_probe;
+       driver->driver.remove = greybus_remove;
+       driver->driver.owner = owner;
+       driver->driver.mod_name = mod_name;
+
+       retval = driver_register(&driver->driver);
+       if (retval)
+               return retval;
+
+       pr_info("registered new driver %s\n", driver->name);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(greybus_register_driver);
+
+void greybus_deregister_driver(struct greybus_driver *driver)
+{
+       driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(greybus_deregister_driver);
+
+static int __init gb_init(void)
+{
+       int retval;
+
+       if (greybus_disabled())
+               return -ENODEV;
+
+       BUILD_BUG_ON(CPORT_ID_MAX >= (long)CPORT_ID_BAD);
+
+       gb_debugfs_init();
+
+       retval = bus_register(&greybus_bus_type);
+       if (retval) {
+               pr_err("bus_register failed (%d)\n", retval);
+               goto error_bus;
+       }
+
+       retval = gb_hd_init();
+       if (retval) {
+               pr_err("gb_hd_init failed (%d)\n", retval);
+               goto error_hd;
+       }
+
+       retval = gb_operation_init();
+       if (retval) {
+               pr_err("gb_operation_init failed (%d)\n", retval);
+               goto error_operation;
+       }
+
+       retval = gb_timesync_init();
+       if (retval) {
+               pr_err("gb_timesync_init failed\n");
+               goto error_timesync;
+       }
+       return 0;       /* Success */
+
+error_timesync:
+       gb_operation_exit();
+error_operation:
+       gb_hd_exit();
+error_hd:
+       bus_unregister(&greybus_bus_type);
+error_bus:
+       gb_debugfs_cleanup();
+
+       return retval;
+}
+module_init(gb_init);
+
+static void __exit gb_exit(void)
+{
+       gb_timesync_exit();
+       gb_operation_exit();
+       gb_hd_exit();
+       bus_unregister(&greybus_bus_type);
+       gb_debugfs_cleanup();
+       tracepoint_synchronize_unregister();
+}
+module_exit(gb_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
diff --git a/drivers/staging/greybus/debugfs.c b/drivers/staging/greybus/debugfs.c
new file mode 100644 (file)
index 0000000..a9d4d3d
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Greybus debugfs code
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/debugfs.h>
+
+#include "greybus.h"
+
+static struct dentry *gb_debug_root;
+
+void __init gb_debugfs_init(void)
+{
+       gb_debug_root = debugfs_create_dir("greybus", NULL);
+}
+
+void gb_debugfs_cleanup(void)
+{
+       debugfs_remove_recursive(gb_debug_root);
+       gb_debug_root = NULL;
+}
+
+struct dentry *gb_debugfs_get(void)
+{
+       return gb_debug_root;
+}
+EXPORT_SYMBOL_GPL(gb_debugfs_get);
diff --git a/drivers/staging/greybus/devices b/drivers/staging/greybus/devices
new file mode 100644 (file)
index 0000000..486bba8
--- /dev/null
@@ -0,0 +1,11 @@
+T:  Bus=01 Lev=03 Prnt=07 Port=02 Cnt=03 Dev#= 12 Spd=12   MxCh= 0
+D:  Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs=  1
+P:  Vendor=ffff ProdID=0001 Rev= 1.00
+S:  Manufacturer=Greybus
+S:  Product=SVC Bridge
+S:  SerialNumber=12239
+C:* #Ifs= 1 Cfg#= 1 Atr=c0 MxPwr=100mA
+I:* If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=es1_ap_driver
+E:  Ad=81(I) Atr=03(Int.) MxPS=  64 Ivl=64ms
+E:  Ad=82(I) Atr=02(Bulk) MxPS=  64 Ivl=0ms
+E:  Ad=02(O) Atr=02(Bulk) MxPS=  64 Ivl=0ms
diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c
new file mode 100644 (file)
index 0000000..6cb13fc
--- /dev/null
@@ -0,0 +1,1598 @@
+/*
+ * Greybus "AP" USB driver for "ES2" controller chips
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+#include <linux/kthread.h>
+#include <linux/sizes.h>
+#include <linux/usb.h>
+#include <linux/kfifo.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <asm/unaligned.h>
+
+#include "arpc.h"
+#include "greybus.h"
+#include "greybus_trace.h"
+#include "kernel_ver.h"
+#include "connection.h"
+
+
+/* Default timeout for USB vendor requests. */
+#define ES2_USB_CTRL_TIMEOUT   500
+
+/* Default timeout for ARPC CPort requests */
+#define ES2_ARPC_CPORT_TIMEOUT 500
+
+/* Fixed CPort numbers */
+#define ES2_CPORT_CDSI0                16
+#define ES2_CPORT_CDSI1                17
+
+/* Memory sizes for the buffers sent to/from the ES2 controller */
+#define ES2_GBUF_MSG_SIZE_MAX  2048
+
+/* Memory sizes for the ARPC buffers */
+#define ARPC_OUT_SIZE_MAX      U16_MAX
+#define ARPC_IN_SIZE_MAX       128
+
+static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x18d1, 0x1eaf) },
+       { },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+#define APB1_LOG_SIZE          SZ_16K
+
+/*
+ * Number of CPort IN urbs in flight at any point in time.
+ * Adjust if we are having stalls in the USB buffer due to not enough urbs in
+ * flight.
+ */
+#define NUM_CPORT_IN_URB       4
+
+/* Number of CPort OUT urbs in flight at any point in time.
+ * Adjust if we get messages saying we are out of urbs in the system log.
+ */
+#define NUM_CPORT_OUT_URB      8
+
+/*
+ * Number of ARPC in urbs in flight at any point in time.
+ */
+#define NUM_ARPC_IN_URB                2
+
+/*
+ * @endpoint: bulk in endpoint for CPort data
+ * @urb: array of urbs for the CPort in messages
+ * @buffer: array of buffers for the @cport_in_urb urbs
+ */
+struct es2_cport_in {
+       __u8 endpoint;
+       struct urb *urb[NUM_CPORT_IN_URB];
+       u8 *buffer[NUM_CPORT_IN_URB];
+};
+
+/**
+ * es2_ap_dev - ES2 USB Bridge to AP structure
+ * @usb_dev: pointer to the USB device we are.
+ * @usb_intf: pointer to the USB interface we are bound to.
+ * @hd: pointer to our gb_host_device structure
+
+ * @cport_in: endpoint, urbs and buffer for cport in messages
+ * @cport_out_endpoint: endpoint for for cport out messages
+ * @cport_out_urb: array of urbs for the CPort out messages
+ * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
+ *                     not.
+ * @cport_out_urb_cancelled: array of flags indicating whether the
+ *                     corresponding @cport_out_urb is being cancelled
+ * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
+ *
+ * @apb_log_task: task pointer for logging thread
+ * @apb_log_dentry: file system entry for the log file interface
+ * @apb_log_enable_dentry: file system entry for enabling logging
+ * @apb_log_fifo: kernel FIFO to carry logged data
+ * @arpc_urb: array of urbs for the ARPC in messages
+ * @arpc_buffer: array of buffers for the @arpc_urb urbs
+ * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC
+ * @arpc_id_cycle: gives an unique id to ARPC
+ * @arpc_lock: locks ARPC list
+ * @arpcs: list of in progress ARPCs
+ */
+struct es2_ap_dev {
+       struct usb_device *usb_dev;
+       struct usb_interface *usb_intf;
+       struct gb_host_device *hd;
+
+       struct es2_cport_in cport_in;
+       __u8 cport_out_endpoint;
+       struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
+       bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
+       bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
+       spinlock_t cport_out_urb_lock;
+
+       bool cdsi1_in_use;
+
+       struct task_struct *apb_log_task;
+       struct dentry *apb_log_dentry;
+       struct dentry *apb_log_enable_dentry;
+       DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
+
+       __u8 arpc_endpoint_in;
+       struct urb *arpc_urb[NUM_ARPC_IN_URB];
+       u8 *arpc_buffer[NUM_ARPC_IN_URB];
+
+       int arpc_id_cycle;
+       spinlock_t arpc_lock;
+       struct list_head arpcs;
+};
+
+/**
+ * timesync_enable_request - Enable timesync in an APBridge
+ * @count: number of TimeSync Pulses to expect
+ * @frame_time: the initial FrameTime at the first TimeSync Pulse
+ * @strobe_delay: the expected delay in microseconds between each TimeSync Pulse
+ * @refclk: The AP mandated reference clock to run FrameTime at
+ */
+struct timesync_enable_request {
+       __u8    count;
+       __le64  frame_time;
+       __le32  strobe_delay;
+       __le32  refclk;
+} __packed;
+
+/**
+ * timesync_authoritative_request - Transmit authoritative FrameTime to APBridge
+ * @frame_time: An array of authoritative FrameTimes provided by the SVC
+ *              and relayed to the APBridge by the AP
+ */
+struct timesync_authoritative_request {
+       __le64  frame_time[GB_TIMESYNC_MAX_STROBES];
+} __packed;
+
+struct arpc {
+       struct list_head list;
+       struct arpc_request_message *req;
+       struct arpc_response_message *resp;
+       struct completion response_received;
+       bool active;
+};
+
+static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
+{
+       return (struct es2_ap_dev *)&hd->hd_priv;
+}
+
+static void cport_out_callback(struct urb *urb);
+static void usb_log_enable(struct es2_ap_dev *es2);
+static void usb_log_disable(struct es2_ap_dev *es2);
+static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
+                    size_t size, int *result, unsigned int timeout);
+
+static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
+{
+       struct usb_device *udev = es2->usb_dev;
+       u8 *data;
+       int retval;
+
+       data = kmalloc(size, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+       memcpy(data, req, size);
+
+       retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                                cmd,
+                                USB_DIR_OUT | USB_TYPE_VENDOR |
+                                USB_RECIP_INTERFACE,
+                                0, 0, data, size, ES2_USB_CTRL_TIMEOUT);
+       if (retval < 0)
+               dev_err(&udev->dev, "%s: return error %d\n", __func__, retval);
+       else
+               retval = 0;
+
+       kfree(data);
+       return retval;
+}
+
+static void ap_urb_complete(struct urb *urb)
+{
+       struct usb_ctrlrequest *dr = urb->context;
+
+       kfree(dr);
+       usb_free_urb(urb);
+}
+
+static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
+{
+       struct usb_device *udev = es2->usb_dev;
+       struct urb *urb;
+       struct usb_ctrlrequest *dr;
+       u8 *buf;
+       int retval;
+
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!urb)
+               return -ENOMEM;
+
+       dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC);
+       if (!dr) {
+               usb_free_urb(urb);
+               return -ENOMEM;
+       }
+
+       buf = (u8 *)dr + sizeof(*dr);
+       memcpy(buf, req, size);
+
+       dr->bRequest = cmd;
+       dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
+       dr->wValue = 0;
+       dr->wIndex = 0;
+       dr->wLength = cpu_to_le16(size);
+
+       usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0),
+                            (unsigned char *)dr, buf, size,
+                            ap_urb_complete, dr);
+       retval = usb_submit_urb(urb, GFP_ATOMIC);
+       if (retval) {
+               usb_free_urb(urb);
+               kfree(dr);
+       }
+       return retval;
+}
+
+static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
+                    bool async)
+{
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+
+       if (async)
+               return output_async(es2, req, size, cmd);
+
+       return output_sync(es2, req, size, cmd);
+}
+
+static int es2_cport_in_enable(struct es2_ap_dev *es2,
+                               struct es2_cport_in *cport_in)
+{
+       struct urb *urb;
+       int ret;
+       int i;
+
+       for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
+               urb = cport_in->urb[i];
+
+               ret = usb_submit_urb(urb, GFP_KERNEL);
+               if (ret) {
+                       dev_err(&es2->usb_dev->dev,
+                                       "failed to submit in-urb: %d\n", ret);
+                       goto err_kill_urbs;
+               }
+       }
+
+       return 0;
+
+err_kill_urbs:
+       for (--i; i >= 0; --i) {
+               urb = cport_in->urb[i];
+               usb_kill_urb(urb);
+       }
+
+       return ret;
+}
+
+static void es2_cport_in_disable(struct es2_ap_dev *es2,
+                               struct es2_cport_in *cport_in)
+{
+       struct urb *urb;
+       int i;
+
+       for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
+               urb = cport_in->urb[i];
+               usb_kill_urb(urb);
+       }
+}
+
+static int es2_arpc_in_enable(struct es2_ap_dev *es2)
+{
+       struct urb *urb;
+       int ret;
+       int i;
+
+       for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
+               urb = es2->arpc_urb[i];
+
+               ret = usb_submit_urb(urb, GFP_KERNEL);
+               if (ret) {
+                       dev_err(&es2->usb_dev->dev,
+                               "failed to submit arpc in-urb: %d\n", ret);
+                       goto err_kill_urbs;
+               }
+       }
+
+       return 0;
+
+err_kill_urbs:
+       for (--i; i >= 0; --i) {
+               urb = es2->arpc_urb[i];
+               usb_kill_urb(urb);
+       }
+
+       return ret;
+}
+
+static void es2_arpc_in_disable(struct es2_ap_dev *es2)
+{
+       struct urb *urb;
+       int i;
+
+       for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
+               urb = es2->arpc_urb[i];
+               usb_kill_urb(urb);
+       }
+}
+
+static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
+{
+       struct urb *urb = NULL;
+       unsigned long flags;
+       int i;
+
+       spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
+
+       /* Look in our pool of allocated urbs first, as that's the "fastest" */
+       for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
+               if (es2->cport_out_urb_busy[i] == false &&
+                               es2->cport_out_urb_cancelled[i] == false) {
+                       es2->cport_out_urb_busy[i] = true;
+                       urb = es2->cport_out_urb[i];
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
+       if (urb)
+               return urb;
+
+       /*
+        * Crap, pool is empty, complain to the syslog and go allocate one
+        * dynamically as we have to succeed.
+        */
+       dev_dbg(&es2->usb_dev->dev,
+               "No free CPort OUT urbs, having to dynamically allocate one!\n");
+       return usb_alloc_urb(0, gfp_mask);
+}
+
+static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
+{
+       unsigned long flags;
+       int i;
+       /*
+        * See if this was an urb in our pool, if so mark it "free", otherwise
+        * we need to free it ourselves.
+        */
+       spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
+       for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
+               if (urb == es2->cport_out_urb[i]) {
+                       es2->cport_out_urb_busy[i] = false;
+                       urb = NULL;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
+
+       /* If urb is not NULL, then we need to free this urb */
+       usb_free_urb(urb);
+}
+
+/*
+ * We (ab)use the operation-message header pad bytes to transfer the
+ * cport id in order to minimise overhead.
+ */
+static void
+gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
+{
+       header->pad[0] = cport_id;
+}
+
+/* Clear the pad bytes used for the CPort id */
+static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
+{
+       header->pad[0] = 0;
+}
+
+/* Extract the CPort id packed into the header, and clear it */
+static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
+{
+       u16 cport_id = header->pad[0];
+
+       gb_message_cport_clear(header);
+
+       return cport_id;
+}
+
+/*
+ * Returns zero if the message was successfully queued, or a negative errno
+ * otherwise.
+ */
+static int message_send(struct gb_host_device *hd, u16 cport_id,
+                       struct gb_message *message, gfp_t gfp_mask)
+{
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct usb_device *udev = es2->usb_dev;
+       size_t buffer_size;
+       int retval;
+       struct urb *urb;
+       unsigned long flags;
+
+       /*
+        * The data actually transferred will include an indication
+        * of where the data should be sent.  Do one last check of
+        * the target CPort id before filling it in.
+        */
+       if (!cport_id_valid(hd, cport_id)) {
+               dev_err(&udev->dev, "invalid cport %u\n", cport_id);
+               return -EINVAL;
+       }
+
+       /* Find a free urb */
+       urb = next_free_urb(es2, gfp_mask);
+       if (!urb)
+               return -ENOMEM;
+
+       spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
+       message->hcpriv = urb;
+       spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
+
+       /* Pack the cport id into the message header */
+       gb_message_cport_pack(message->header, cport_id);
+
+       buffer_size = sizeof(*message->header) + message->payload_size;
+
+       usb_fill_bulk_urb(urb, udev,
+                         usb_sndbulkpipe(udev,
+                                         es2->cport_out_endpoint),
+                         message->buffer, buffer_size,
+                         cport_out_callback, message);
+       urb->transfer_flags |= URB_ZERO_PACKET;
+
+       trace_gb_message_submit(message);
+
+       retval = usb_submit_urb(urb, gfp_mask);
+       if (retval) {
+               dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
+
+               spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
+               message->hcpriv = NULL;
+               spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
+
+               free_urb(es2, urb);
+               gb_message_cport_clear(message->header);
+
+               return retval;
+       }
+
+       return 0;
+}
+
+/*
+ * Can not be called in atomic context.
+ */
+static void message_cancel(struct gb_message *message)
+{
+       struct gb_host_device *hd = message->operation->connection->hd;
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct urb *urb;
+       int i;
+
+       might_sleep();
+
+       spin_lock_irq(&es2->cport_out_urb_lock);
+       urb = message->hcpriv;
+
+       /* Prevent dynamically allocated urb from being deallocated. */
+       usb_get_urb(urb);
+
+       /* Prevent pre-allocated urb from being reused. */
+       for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
+               if (urb == es2->cport_out_urb[i]) {
+                       es2->cport_out_urb_cancelled[i] = true;
+                       break;
+               }
+       }
+       spin_unlock_irq(&es2->cport_out_urb_lock);
+
+       usb_kill_urb(urb);
+
+       if (i < NUM_CPORT_OUT_URB) {
+               spin_lock_irq(&es2->cport_out_urb_lock);
+               es2->cport_out_urb_cancelled[i] = false;
+               spin_unlock_irq(&es2->cport_out_urb_lock);
+       }
+
+       usb_free_urb(urb);
+}
+
+static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
+                               unsigned long flags)
+{
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct ida *id_map = &hd->cport_id_map;
+       int ida_start, ida_end;
+
+       switch (cport_id) {
+       case ES2_CPORT_CDSI0:
+       case ES2_CPORT_CDSI1:
+               dev_err(&hd->dev, "cport %d not available\n", cport_id);
+               return -EBUSY;
+       }
+
+       if (flags & GB_CONNECTION_FLAG_OFFLOADED &&
+                       flags & GB_CONNECTION_FLAG_CDSI1) {
+               if (es2->cdsi1_in_use) {
+                       dev_err(&hd->dev, "CDSI1 already in use\n");
+                       return -EBUSY;
+               }
+
+               es2->cdsi1_in_use = true;
+
+               return ES2_CPORT_CDSI1;
+       }
+
+       if (cport_id < 0) {
+               ida_start = 0;
+               ida_end = hd->num_cports;
+       } else if (cport_id < hd->num_cports) {
+               ida_start = cport_id;
+               ida_end = cport_id + 1;
+       } else {
+               dev_err(&hd->dev, "cport %d not available\n", cport_id);
+               return -EINVAL;
+       }
+
+       return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
+}
+
+static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
+{
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+
+       switch (cport_id) {
+       case ES2_CPORT_CDSI1:
+               es2->cdsi1_in_use = false;
+               return;
+       }
+
+       ida_simple_remove(&hd->cport_id_map, cport_id);
+}
+
+static int cport_enable(struct gb_host_device *hd, u16 cport_id,
+                       unsigned long flags)
+{
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct usb_device *udev = es2->usb_dev;
+       struct gb_apb_request_cport_flags *req;
+       u32 connection_flags;
+       int ret;
+
+       req = kzalloc(sizeof(*req), GFP_KERNEL);
+       if (!req)
+               return -ENOMEM;
+
+       connection_flags = 0;
+       if (flags & GB_CONNECTION_FLAG_CONTROL)
+               connection_flags |= GB_APB_CPORT_FLAG_CONTROL;
+       if (flags & GB_CONNECTION_FLAG_HIGH_PRIO)
+               connection_flags |= GB_APB_CPORT_FLAG_HIGH_PRIO;
+
+       req->flags = cpu_to_le32(connection_flags);
+
+       dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__,
+                       cport_id, connection_flags);
+
+       ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                               GB_APB_REQUEST_CPORT_FLAGS,
+                               USB_DIR_OUT | USB_TYPE_VENDOR |
+                               USB_RECIP_INTERFACE, cport_id, 0,
+                               req, sizeof(*req), ES2_USB_CTRL_TIMEOUT);
+       if (ret != sizeof(*req)) {
+               dev_err(&udev->dev, "failed to set cport flags for port %d\n",
+                               cport_id);
+               if (ret >= 0)
+                       ret = -EIO;
+
+               goto out;
+       }
+
+       ret = 0;
+out:
+       kfree(req);
+
+       return ret;
+}
+
+static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id)
+{
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct device *dev = &es2->usb_dev->dev;
+       struct arpc_cport_connected_req req;
+       int ret;
+
+       req.cport_id = cpu_to_le16(cport_id);
+       ret = arpc_sync(es2, ARPC_TYPE_CPORT_CONNECTED, &req, sizeof(req),
+                       NULL, ES2_ARPC_CPORT_TIMEOUT);
+       if (ret) {
+               dev_err(dev, "failed to set connected state for cport %u: %d\n",
+                               cport_id, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id)
+{
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct device *dev = &es2->usb_dev->dev;
+       struct arpc_cport_flush_req req;
+       int ret;
+
+       req.cport_id = cpu_to_le16(cport_id);
+       ret = arpc_sync(es2, ARPC_TYPE_CPORT_FLUSH, &req, sizeof(req),
+                       NULL, ES2_ARPC_CPORT_TIMEOUT);
+       if (ret) {
+               dev_err(dev, "failed to flush cport %u: %d\n", cport_id, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id,
+                               u8 phase, unsigned int timeout)
+{
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct device *dev = &es2->usb_dev->dev;
+       struct arpc_cport_shutdown_req req;
+       int result;
+       int ret;
+
+       if (timeout > U16_MAX)
+               return -EINVAL;
+
+       req.cport_id = cpu_to_le16(cport_id);
+       req.timeout = cpu_to_le16(timeout);
+       req.phase = phase;
+       ret = arpc_sync(es2, ARPC_TYPE_CPORT_SHUTDOWN, &req, sizeof(req),
+                       &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
+       if (ret) {
+               dev_err(dev, "failed to send shutdown over cport %u: %d (%d)\n",
+                               cport_id, ret, result);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id,
+                               size_t peer_space, unsigned int timeout)
+{
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct device *dev = &es2->usb_dev->dev;
+       struct arpc_cport_quiesce_req req;
+       int result;
+       int ret;
+
+       if (peer_space > U16_MAX)
+               return -EINVAL;
+
+       if (timeout > U16_MAX)
+               return -EINVAL;
+
+       req.cport_id = cpu_to_le16(cport_id);
+       req.peer_space = cpu_to_le16(peer_space);
+       req.timeout = cpu_to_le16(timeout);
+       ret = arpc_sync(es2, ARPC_TYPE_CPORT_QUIESCE, &req, sizeof(req),
+                       &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
+       if (ret) {
+               dev_err(dev, "failed to quiesce cport %u: %d (%d)\n",
+                               cport_id, ret, result);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id)
+{
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct device *dev = &es2->usb_dev->dev;
+       struct arpc_cport_clear_req req;
+       int ret;
+
+       req.cport_id = cpu_to_le16(cport_id);
+       ret = arpc_sync(es2, ARPC_TYPE_CPORT_CLEAR, &req, sizeof(req),
+                       NULL, ES2_ARPC_CPORT_TIMEOUT);
+       if (ret) {
+               dev_err(dev, "failed to clear cport %u: %d\n", cport_id, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
+{
+       int retval;
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct usb_device *udev = es2->usb_dev;
+
+       retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                                GB_APB_REQUEST_LATENCY_TAG_EN,
+                                USB_DIR_OUT | USB_TYPE_VENDOR |
+                                USB_RECIP_INTERFACE, cport_id, 0, NULL,
+                                0, ES2_USB_CTRL_TIMEOUT);
+
+       if (retval < 0)
+               dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
+                       cport_id);
+       return retval;
+}
+
+static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
+{
+       int retval;
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct usb_device *udev = es2->usb_dev;
+
+       retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                                GB_APB_REQUEST_LATENCY_TAG_DIS,
+                                USB_DIR_OUT | USB_TYPE_VENDOR |
+                                USB_RECIP_INTERFACE, cport_id, 0, NULL,
+                                0, ES2_USB_CTRL_TIMEOUT);
+
+       if (retval < 0)
+               dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
+                       cport_id);
+       return retval;
+}
+
+static int timesync_enable(struct gb_host_device *hd, u8 count,
+                          u64 frame_time, u32 strobe_delay, u32 refclk)
+{
+       int retval;
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct usb_device *udev = es2->usb_dev;
+       struct gb_control_timesync_enable_request *request;
+
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (!request)
+               return -ENOMEM;
+
+       request->count = count;
+       request->frame_time = cpu_to_le64(frame_time);
+       request->strobe_delay = cpu_to_le32(strobe_delay);
+       request->refclk = cpu_to_le32(refclk);
+       retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                                GB_APB_REQUEST_TIMESYNC_ENABLE,
+                                USB_DIR_OUT | USB_TYPE_VENDOR |
+                                USB_RECIP_INTERFACE, 0, 0, request,
+                                sizeof(*request), ES2_USB_CTRL_TIMEOUT);
+       if (retval < 0)
+               dev_err(&udev->dev, "Cannot enable timesync %d\n", retval);
+
+       kfree(request);
+       return retval;
+}
+
+static int timesync_disable(struct gb_host_device *hd)
+{
+       int retval;
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct usb_device *udev = es2->usb_dev;
+
+       retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                                GB_APB_REQUEST_TIMESYNC_DISABLE,
+                                USB_DIR_OUT | USB_TYPE_VENDOR |
+                                USB_RECIP_INTERFACE, 0, 0, NULL,
+                                0, ES2_USB_CTRL_TIMEOUT);
+       if (retval < 0)
+               dev_err(&udev->dev, "Cannot disable timesync %d\n", retval);
+
+       return retval;
+}
+
+static int timesync_authoritative(struct gb_host_device *hd, u64 *frame_time)
+{
+       int retval, i;
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct usb_device *udev = es2->usb_dev;
+       struct timesync_authoritative_request *request;
+
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (!request)
+               return -ENOMEM;
+
+       for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
+               request->frame_time[i] = cpu_to_le64(frame_time[i]);
+
+       retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                                GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE,
+                                USB_DIR_OUT | USB_TYPE_VENDOR |
+                                USB_RECIP_INTERFACE, 0, 0, request,
+                                sizeof(*request), ES2_USB_CTRL_TIMEOUT);
+       if (retval < 0)
+               dev_err(&udev->dev, "Cannot timesync authoritative out %d\n", retval);
+
+       kfree(request);
+       return retval;
+}
+
+static int timesync_get_last_event(struct gb_host_device *hd, u64 *frame_time)
+{
+       int retval;
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       struct usb_device *udev = es2->usb_dev;
+       __le64 *response_frame_time;
+
+       response_frame_time = kzalloc(sizeof(*response_frame_time), GFP_KERNEL);
+       if (!response_frame_time)
+               return -ENOMEM;
+
+       retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+                                GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT,
+                                USB_DIR_IN | USB_TYPE_VENDOR |
+                                USB_RECIP_INTERFACE, 0, 0, response_frame_time,
+                                sizeof(*response_frame_time),
+                                ES2_USB_CTRL_TIMEOUT);
+
+       if (retval != sizeof(*response_frame_time)) {
+               dev_err(&udev->dev, "Cannot get last TimeSync event: %d\n",
+                       retval);
+
+               if (retval >= 0)
+                       retval = -EIO;
+
+               goto out;
+       }
+       *frame_time = le64_to_cpu(*response_frame_time);
+       retval = 0;
+out:
+       kfree(response_frame_time);
+       return retval;
+}
+
+static struct gb_hd_driver es2_driver = {
+       .hd_priv_size                   = sizeof(struct es2_ap_dev),
+       .message_send                   = message_send,
+       .message_cancel                 = message_cancel,
+       .cport_allocate                 = es2_cport_allocate,
+       .cport_release                  = es2_cport_release,
+       .cport_enable                   = cport_enable,
+       .cport_connected                = es2_cport_connected,
+       .cport_flush                    = es2_cport_flush,
+       .cport_shutdown                 = es2_cport_shutdown,
+       .cport_quiesce                  = es2_cport_quiesce,
+       .cport_clear                    = es2_cport_clear,
+       .latency_tag_enable             = latency_tag_enable,
+       .latency_tag_disable            = latency_tag_disable,
+       .output                         = output,
+       .timesync_enable                = timesync_enable,
+       .timesync_disable               = timesync_disable,
+       .timesync_authoritative         = timesync_authoritative,
+       .timesync_get_last_event        = timesync_get_last_event,
+};
+
+/* Common function to report consistent warnings based on URB status */
+static int check_urb_status(struct urb *urb)
+{
+       struct device *dev = &urb->dev->dev;
+       int status = urb->status;
+
+       switch (status) {
+       case 0:
+               return 0;
+
+       case -EOVERFLOW:
+               dev_err(dev, "%s: overflow actual length is %d\n",
+                       __func__, urb->actual_length);
+       case -ECONNRESET:
+       case -ENOENT:
+       case -ESHUTDOWN:
+       case -EILSEQ:
+       case -EPROTO:
+               /* device is gone, stop sending */
+               return status;
+       }
+       dev_err(dev, "%s: unknown status %d\n", __func__, status);
+
+       return -EAGAIN;
+}
+
+static void es2_destroy(struct es2_ap_dev *es2)
+{
+       struct usb_device *udev;
+       struct urb *urb;
+       int i;
+
+       debugfs_remove(es2->apb_log_enable_dentry);
+       usb_log_disable(es2);
+
+       /* Tear down everything! */
+       for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
+               urb = es2->cport_out_urb[i];
+               usb_kill_urb(urb);
+               usb_free_urb(urb);
+               es2->cport_out_urb[i] = NULL;
+               es2->cport_out_urb_busy[i] = false;     /* just to be anal */
+       }
+
+       for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
+               usb_free_urb(es2->arpc_urb[i]);
+               kfree(es2->arpc_buffer[i]);
+               es2->arpc_buffer[i] = NULL;
+       }
+
+       for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
+               usb_free_urb(es2->cport_in.urb[i]);
+               kfree(es2->cport_in.buffer[i]);
+               es2->cport_in.buffer[i] = NULL;
+       }
+
+       /* release reserved CDSI0 and CDSI1 cports */
+       gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1);
+       gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0);
+
+       udev = es2->usb_dev;
+       gb_hd_put(es2->hd);
+
+       usb_put_dev(udev);
+}
+
+static void cport_in_callback(struct urb *urb)
+{
+       struct gb_host_device *hd = urb->context;
+       struct device *dev = &urb->dev->dev;
+       struct gb_operation_msg_hdr *header;
+       int status = check_urb_status(urb);
+       int retval;
+       u16 cport_id;
+
+       if (status) {
+               if ((status == -EAGAIN) || (status == -EPROTO))
+                       goto exit;
+
+               /* The urb is being unlinked */
+               if (status == -ENOENT || status == -ESHUTDOWN)
+                       return;
+
+               dev_err(dev, "urb cport in error %d (dropped)\n", status);
+               return;
+       }
+
+       if (urb->actual_length < sizeof(*header)) {
+               dev_err(dev, "short message received\n");
+               goto exit;
+       }
+
+       /* Extract the CPort id, which is packed in the message header */
+       header = urb->transfer_buffer;
+       cport_id = gb_message_cport_unpack(header);
+
+       if (cport_id_valid(hd, cport_id)) {
+               greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
+                                                       urb->actual_length);
+       } else {
+               dev_err(dev, "invalid cport id %u received\n", cport_id);
+       }
+exit:
+       /* put our urb back in the request pool */
+       retval = usb_submit_urb(urb, GFP_ATOMIC);
+       if (retval)
+               dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
+}
+
+static void cport_out_callback(struct urb *urb)
+{
+       struct gb_message *message = urb->context;
+       struct gb_host_device *hd = message->operation->connection->hd;
+       struct es2_ap_dev *es2 = hd_to_es2(hd);
+       int status = check_urb_status(urb);
+       unsigned long flags;
+
+       gb_message_cport_clear(message->header);
+
+       spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
+       message->hcpriv = NULL;
+       spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
+
+       /*
+        * Tell the submitter that the message send (attempt) is
+        * complete, and report the status.
+        */
+       greybus_message_sent(hd, message, status);
+
+       free_urb(es2, urb);
+}
+
+static struct arpc *arpc_alloc(void *payload, u16 size, u8 type)
+{
+       struct arpc *rpc;
+
+       if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX)
+               return NULL;
+
+       rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
+       if (!rpc)
+               return NULL;
+
+       INIT_LIST_HEAD(&rpc->list);
+       rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL);
+       if (!rpc->req)
+               goto err_free_rpc;
+
+       rpc->resp = kzalloc(sizeof(*rpc->resp), GFP_KERNEL);
+       if (!rpc->resp)
+               goto err_free_req;
+
+       rpc->req->type = type;
+       rpc->req->size = cpu_to_le16(sizeof(rpc->req) + size);
+       memcpy(rpc->req->data, payload, size);
+
+       init_completion(&rpc->response_received);
+
+       return rpc;
+
+err_free_req:
+       kfree(rpc->req);
+err_free_rpc:
+       kfree(rpc);
+
+       return NULL;
+}
+
+static void arpc_free(struct arpc *rpc)
+{
+       kfree(rpc->req);
+       kfree(rpc->resp);
+       kfree(rpc);
+}
+
+static struct arpc *arpc_find(struct es2_ap_dev *es2, __le16 id)
+{
+       struct arpc *rpc;
+
+       list_for_each_entry(rpc, &es2->arpcs, list) {
+               if (rpc->req->id == id)
+                       return rpc;
+       }
+
+       return NULL;
+}
+
+static void arpc_add(struct es2_ap_dev *es2, struct arpc *rpc)
+{
+       rpc->active = true;
+       rpc->req->id = cpu_to_le16(es2->arpc_id_cycle++);
+       list_add_tail(&rpc->list, &es2->arpcs);
+}
+
+static void arpc_del(struct es2_ap_dev *es2, struct arpc *rpc)
+{
+       if (rpc->active) {
+               rpc->active = false;
+               list_del(&rpc->list);
+       }
+}
+
+static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout)
+{
+       struct usb_device *udev = es2->usb_dev;
+       int retval;
+
+       retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                                GB_APB_REQUEST_ARPC_RUN,
+                                USB_DIR_OUT | USB_TYPE_VENDOR |
+                                USB_RECIP_INTERFACE,
+                                0, 0,
+                                rpc->req, le16_to_cpu(rpc->req->size),
+                                ES2_USB_CTRL_TIMEOUT);
+       if (retval != le16_to_cpu(rpc->req->size)) {
+               dev_err(&udev->dev,
+                       "failed to send ARPC request %d: %d\n",
+                       rpc->req->type, retval);
+               if (retval > 0)
+                       retval = -EIO;
+               return retval;
+       }
+
+       return 0;
+}
+
+static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
+                    size_t size, int *result, unsigned int timeout)
+{
+       struct arpc *rpc;
+       unsigned long flags;
+       int retval;
+
+       if (result)
+               *result = 0;
+
+       rpc = arpc_alloc(payload, size, type);
+       if (!rpc)
+               return -ENOMEM;
+
+       spin_lock_irqsave(&es2->arpc_lock, flags);
+       arpc_add(es2, rpc);
+       spin_unlock_irqrestore(&es2->arpc_lock, flags);
+
+       retval = arpc_send(es2, rpc, timeout);
+       if (retval)
+               goto out_arpc_del;
+
+       retval = wait_for_completion_interruptible_timeout(
+                                               &rpc->response_received,
+                                               msecs_to_jiffies(timeout));
+       if (retval <= 0) {
+               if (!retval)
+                       retval = -ETIMEDOUT;
+               goto out_arpc_del;
+       }
+
+       if (rpc->resp->result) {
+               retval = -EREMOTEIO;
+               if (result)
+                       *result = rpc->resp->result;
+       } else {
+               retval = 0;
+       }
+
+out_arpc_del:
+       spin_lock_irqsave(&es2->arpc_lock, flags);
+       arpc_del(es2, rpc);
+       spin_unlock_irqrestore(&es2->arpc_lock, flags);
+       arpc_free(rpc);
+
+       if (retval < 0 && retval != -EREMOTEIO) {
+               dev_err(&es2->usb_dev->dev,
+                       "failed to execute ARPC: %d\n", retval);
+       }
+
+       return retval;
+}
+
+static void arpc_in_callback(struct urb *urb)
+{
+       struct es2_ap_dev *es2 = urb->context;
+       struct device *dev = &urb->dev->dev;
+       int status = check_urb_status(urb);
+       struct arpc *rpc;
+       struct arpc_response_message *resp;
+       unsigned long flags;
+       int retval;
+
+       if (status) {
+               if ((status == -EAGAIN) || (status == -EPROTO))
+                       goto exit;
+
+               /* The urb is being unlinked */
+               if (status == -ENOENT || status == -ESHUTDOWN)
+                       return;
+
+               dev_err(dev, "arpc in-urb error %d (dropped)\n", status);
+               return;
+       }
+
+       if (urb->actual_length < sizeof(*resp)) {
+               dev_err(dev, "short aprc response received\n");
+               goto exit;
+       }
+
+       resp = urb->transfer_buffer;
+       spin_lock_irqsave(&es2->arpc_lock, flags);
+       rpc = arpc_find(es2, resp->id);
+       if (!rpc) {
+               dev_err(dev, "invalid arpc response id received: %u\n",
+                       le16_to_cpu(resp->id));
+               spin_unlock_irqrestore(&es2->arpc_lock, flags);
+               goto exit;
+       }
+
+       arpc_del(es2, rpc);
+       memcpy(rpc->resp, resp, sizeof(*resp));
+       complete(&rpc->response_received);
+       spin_unlock_irqrestore(&es2->arpc_lock, flags);
+
+exit:
+       /* put our urb back in the request pool */
+       retval = usb_submit_urb(urb, GFP_ATOMIC);
+       if (retval)
+               dev_err(dev, "failed to resubmit arpc in-urb: %d\n", retval);
+}
+
+#define APB1_LOG_MSG_SIZE      64
+static void apb_log_get(struct es2_ap_dev *es2, char *buf)
+{
+       int retval;
+
+       do {
+               retval = usb_control_msg(es2->usb_dev,
+                                       usb_rcvctrlpipe(es2->usb_dev, 0),
+                                       GB_APB_REQUEST_LOG,
+                                       USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+                                       0x00, 0x00,
+                                       buf,
+                                       APB1_LOG_MSG_SIZE,
+                                       ES2_USB_CTRL_TIMEOUT);
+               if (retval > 0)
+                       kfifo_in(&es2->apb_log_fifo, buf, retval);
+       } while (retval > 0);
+}
+
+static int apb_log_poll(void *data)
+{
+       struct es2_ap_dev *es2 = data;
+       char *buf;
+
+       buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       while (!kthread_should_stop()) {
+               msleep(1000);
+               apb_log_get(es2, buf);
+       }
+
+       kfree(buf);
+
+       return 0;
+}
+
+static ssize_t apb_log_read(struct file *f, char __user *buf,
+                               size_t count, loff_t *ppos)
+{
+       struct es2_ap_dev *es2 = f->f_inode->i_private;
+       ssize_t ret;
+       size_t copied;
+       char *tmp_buf;
+
+       if (count > APB1_LOG_SIZE)
+               count = APB1_LOG_SIZE;
+
+       tmp_buf = kmalloc(count, GFP_KERNEL);
+       if (!tmp_buf)
+               return -ENOMEM;
+
+       copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
+       ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);
+
+       kfree(tmp_buf);
+
+       return ret;
+}
+
+static const struct file_operations apb_log_fops = {
+       .read   = apb_log_read,
+};
+
+static void usb_log_enable(struct es2_ap_dev *es2)
+{
+       if (!IS_ERR_OR_NULL(es2->apb_log_task))
+               return;
+
+       /* get log from APB1 */
+       es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
+       if (IS_ERR(es2->apb_log_task))
+               return;
+       /* XXX We will need to rename this per APB */
+       es2->apb_log_dentry = debugfs_create_file("apb_log", S_IRUGO,
+                                               gb_debugfs_get(), es2,
+                                               &apb_log_fops);
+}
+
+static void usb_log_disable(struct es2_ap_dev *es2)
+{
+       if (IS_ERR_OR_NULL(es2->apb_log_task))
+               return;
+
+       debugfs_remove(es2->apb_log_dentry);
+       es2->apb_log_dentry = NULL;
+
+       kthread_stop(es2->apb_log_task);
+       es2->apb_log_task = NULL;
+}
+
+static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
+                               size_t count, loff_t *ppos)
+{
+       struct es2_ap_dev *es2 = f->f_inode->i_private;
+       int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
+       char tmp_buf[3];
+
+       sprintf(tmp_buf, "%d\n", enable);
+       return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
+}
+
+static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
+                               size_t count, loff_t *ppos)
+{
+       int enable;
+       ssize_t retval;
+       struct es2_ap_dev *es2 = f->f_inode->i_private;
+
+       retval = kstrtoint_from_user(buf, count, 10, &enable);
+       if (retval)
+               return retval;
+
+       if (enable)
+               usb_log_enable(es2);
+       else
+               usb_log_disable(es2);
+
+       return count;
+}
+
+static const struct file_operations apb_log_enable_fops = {
+       .read   = apb_log_enable_read,
+       .write  = apb_log_enable_write,
+};
+
+static int apb_get_cport_count(struct usb_device *udev)
+{
+       int retval;
+       __le16 *cport_count;
+
+       cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL);
+       if (!cport_count)
+               return -ENOMEM;
+
+       retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+                                GB_APB_REQUEST_CPORT_COUNT,
+                                USB_DIR_IN | USB_TYPE_VENDOR |
+                                USB_RECIP_INTERFACE, 0, 0, cport_count,
+                                sizeof(*cport_count), ES2_USB_CTRL_TIMEOUT);
+       if (retval != sizeof(*cport_count)) {
+               dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
+                       retval);
+
+               if (retval >= 0)
+                       retval = -EIO;
+
+               goto out;
+       }
+
+       retval = le16_to_cpu(*cport_count);
+
+       /* We need to fit a CPort ID in one byte of a message header */
+       if (retval > U8_MAX) {
+               retval = U8_MAX;
+               dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
+       }
+
+out:
+       kfree(cport_count);
+       return retval;
+}
+
+/*
+ * The ES2 USB Bridge device has 15 endpoints
+ * 1 Control - usual USB stuff + AP -> APBridgeA messages
+ * 7 Bulk IN - CPort data in
+ * 7 Bulk OUT - CPort data out
+ */
+static int ap_probe(struct usb_interface *interface,
+                   const struct usb_device_id *id)
+{
+       struct es2_ap_dev *es2;
+       struct gb_host_device *hd;
+       struct usb_device *udev;
+       struct usb_host_interface *iface_desc;
+       struct usb_endpoint_descriptor *endpoint;
+       __u8 ep_addr;
+       int retval;
+       int i;
+       int num_cports;
+       bool bulk_out_found = false;
+       bool bulk_in_found = false;
+       bool arpc_in_found = false;
+
+       udev = usb_get_dev(interface_to_usbdev(interface));
+
+       num_cports = apb_get_cport_count(udev);
+       if (num_cports < 0) {
+               usb_put_dev(udev);
+               dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
+                       num_cports);
+               return num_cports;
+       }
+
+       hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
+                               num_cports);
+       if (IS_ERR(hd)) {
+               usb_put_dev(udev);
+               return PTR_ERR(hd);
+       }
+
+       es2 = hd_to_es2(hd);
+       es2->hd = hd;
+       es2->usb_intf = interface;
+       es2->usb_dev = udev;
+       spin_lock_init(&es2->cport_out_urb_lock);
+       INIT_KFIFO(es2->apb_log_fifo);
+       usb_set_intfdata(interface, es2);
+
+       /*
+        * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
+        * dynamically.
+        */
+       retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0);
+       if (retval)
+               goto error;
+       retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1);
+       if (retval)
+               goto error;
+
+       /* find all bulk endpoints */
+       iface_desc = interface->cur_altsetting;
+       for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+               endpoint = &iface_desc->endpoint[i].desc;
+               ep_addr = endpoint->bEndpointAddress;
+
+               if (usb_endpoint_is_bulk_in(endpoint)) {
+                       if (!bulk_in_found) {
+                               es2->cport_in.endpoint = ep_addr;
+                               bulk_in_found = true;
+                       } else if (!arpc_in_found) {
+                               es2->arpc_endpoint_in = ep_addr;
+                               arpc_in_found = true;
+                       } else {
+                               dev_warn(&udev->dev,
+                                        "Unused bulk IN endpoint found: 0x%02x\n",
+                                        ep_addr);
+                       }
+                       continue;
+               }
+               if (usb_endpoint_is_bulk_out(endpoint)) {
+                       if (!bulk_out_found) {
+                               es2->cport_out_endpoint = ep_addr;
+                               bulk_out_found = true;
+                       } else {
+                               dev_warn(&udev->dev,
+                                        "Unused bulk OUT endpoint found: 0x%02x\n",
+                                        ep_addr);
+                       }
+                       continue;
+               }
+               dev_warn(&udev->dev,
+                        "Unknown endpoint type found, address 0x%02x\n",
+                        ep_addr);
+       }
+       if (!bulk_in_found || !arpc_in_found || !bulk_out_found) {
+               dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
+               retval = -ENODEV;
+               goto error;
+       }
+
+       /* Allocate buffers for our cport in messages */
+       for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
+               struct urb *urb;
+               u8 *buffer;
+
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!urb) {
+                       retval = -ENOMEM;
+                       goto error;
+               }
+               es2->cport_in.urb[i] = urb;
+
+               buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
+               if (!buffer) {
+                       retval = -ENOMEM;
+                       goto error;
+               }
+
+               usb_fill_bulk_urb(urb, udev,
+                                 usb_rcvbulkpipe(udev, es2->cport_in.endpoint),
+                                 buffer, ES2_GBUF_MSG_SIZE_MAX,
+                                 cport_in_callback, hd);
+
+               es2->cport_in.buffer[i] = buffer;
+       }
+
+       /* Allocate buffers for ARPC in messages */
+       for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
+               struct urb *urb;
+               u8 *buffer;
+
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!urb) {
+                       retval = -ENOMEM;
+                       goto error;
+               }
+               es2->arpc_urb[i] = urb;
+
+               buffer = kmalloc(ARPC_IN_SIZE_MAX, GFP_KERNEL);
+               if (!buffer) {
+                       retval = -ENOMEM;
+                       goto error;
+               }
+
+               usb_fill_bulk_urb(urb, udev,
+                                 usb_rcvbulkpipe(udev,
+                                                 es2->arpc_endpoint_in),
+                                 buffer, ARPC_IN_SIZE_MAX,
+                                 arpc_in_callback, es2);
+
+               es2->arpc_buffer[i] = buffer;
+       }
+
+       /* Allocate urbs for our CPort OUT messages */
+       for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
+               struct urb *urb;
+
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!urb) {
+                       retval = -ENOMEM;
+                       goto error;
+               }
+
+               es2->cport_out_urb[i] = urb;
+               es2->cport_out_urb_busy[i] = false;     /* just to be anal */
+       }
+
+       /* XXX We will need to rename this per APB */
+       es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
+                                                       (S_IWUSR | S_IRUGO),
+                                                       gb_debugfs_get(), es2,
+                                                       &apb_log_enable_fops);
+
+       INIT_LIST_HEAD(&es2->arpcs);
+       spin_lock_init(&es2->arpc_lock);
+
+       if (es2_arpc_in_enable(es2))
+               goto error;
+
+       retval = gb_hd_add(hd);
+       if (retval)
+               goto err_disable_arpc_in;
+
+       retval = es2_cport_in_enable(es2, &es2->cport_in);
+       if (retval)
+               goto err_hd_del;
+
+       return 0;
+
+err_hd_del:
+       gb_hd_del(hd);
+err_disable_arpc_in:
+       es2_arpc_in_disable(es2);
+error:
+       es2_destroy(es2);
+
+       return retval;
+}
+
+static void ap_disconnect(struct usb_interface *interface)
+{
+       struct es2_ap_dev *es2 = usb_get_intfdata(interface);
+
+       gb_hd_del(es2->hd);
+
+       es2_cport_in_disable(es2, &es2->cport_in);
+       es2_arpc_in_disable(es2);
+
+       es2_destroy(es2);
+}
+
+static struct usb_driver es2_ap_driver = {
+       .name =         "es2_ap_driver",
+       .probe =        ap_probe,
+       .disconnect =   ap_disconnect,
+       .id_table =     id_table,
+       .soft_unbind =  1,
+};
+
+module_usb_driver(es2_ap_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
diff --git a/drivers/staging/greybus/firmware.h b/drivers/staging/greybus/firmware.h
new file mode 100644 (file)
index 0000000..f4f0db1
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Greybus Firmware Management Header
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __FIRMWARE_H
+#define __FIRMWARE_H
+
+#include "greybus.h"
+
+#define FW_NAME_PREFIX "gmp_"
+
+/*
+ * Length of the string in format: "FW_NAME_PREFIX""%08x_%08x_%08x_%08x_%s.tftf"
+ *                                  (3 + 1 + 4 * (8 + 1) + 10 + 1 + 4 + 1)
+ */
+#define FW_NAME_SIZE           56
+
+/* Firmware Management Protocol specific functions */
+int fw_mgmt_init(void);
+void fw_mgmt_exit(void);
+struct gb_connection *to_fw_mgmt_connection(struct device *dev);
+int gb_fw_mgmt_request_handler(struct gb_operation *op);
+int gb_fw_mgmt_connection_init(struct gb_connection *connection);
+void gb_fw_mgmt_connection_exit(struct gb_connection *connection);
+
+/* Firmware Download Protocol specific functions */
+int gb_fw_download_request_handler(struct gb_operation *op);
+int gb_fw_download_connection_init(struct gb_connection *connection);
+void gb_fw_download_connection_exit(struct gb_connection *connection);
+
+/* CAP Protocol specific functions */
+int cap_init(void);
+void cap_exit(void);
+int gb_cap_connection_init(struct gb_connection *connection);
+void gb_cap_connection_exit(struct gb_connection *connection);
+
+#endif /* __FIRMWARE_H */
diff --git a/drivers/staging/greybus/fw-core.c b/drivers/staging/greybus/fw-core.c
new file mode 100644 (file)
index 0000000..a7e4a8c
--- /dev/null
@@ -0,0 +1,331 @@
+/*
+ * Greybus Firmware Core Bundle Driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/firmware.h>
+#include "firmware.h"
+#include "greybus.h"
+#include "spilib.h"
+
+struct gb_fw_core {
+       struct gb_connection    *download_connection;
+       struct gb_connection    *mgmt_connection;
+       struct gb_connection    *spi_connection;
+       struct gb_connection    *cap_connection;
+};
+
+#ifndef SPI_CORE_SUPPORT_PM
+static int fw_spi_prepare_transfer_hardware(struct device *dev)
+{
+       return gb_pm_runtime_get_sync(to_gb_bundle(dev));
+}
+
+static void fw_spi_unprepare_transfer_hardware(struct device *dev)
+{
+       gb_pm_runtime_put_autosuspend(to_gb_bundle(dev));
+}
+
+static struct spilib_ops __spilib_ops = {
+       .prepare_transfer_hardware = fw_spi_prepare_transfer_hardware,
+       .unprepare_transfer_hardware = fw_spi_unprepare_transfer_hardware,
+};
+
+static struct spilib_ops *spilib_ops = &__spilib_ops;
+#else
+static struct spilib_ops *spilib_ops = NULL;
+#endif
+
+struct gb_connection *to_fw_mgmt_connection(struct device *dev)
+{
+       struct gb_fw_core *fw_core = dev_get_drvdata(dev);
+
+       return fw_core->mgmt_connection;
+}
+
+static int gb_fw_spi_connection_init(struct gb_connection *connection)
+{
+       int ret;
+
+       if (!connection)
+               return 0;
+
+       ret = gb_connection_enable(connection);
+       if (ret)
+               return ret;
+
+       ret = gb_spilib_master_init(connection, &connection->bundle->dev,
+                                   spilib_ops);
+       if (ret) {
+               gb_connection_disable(connection);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void gb_fw_spi_connection_exit(struct gb_connection *connection)
+{
+       if (!connection)
+               return;
+
+       gb_spilib_master_exit(connection);
+       gb_connection_disable(connection);
+}
+
+static int gb_fw_core_probe(struct gb_bundle *bundle,
+                           const struct greybus_bundle_id *id)
+{
+       struct greybus_descriptor_cport *cport_desc;
+       struct gb_connection *connection;
+       struct gb_fw_core *fw_core;
+       int ret, i;
+       u16 cport_id;
+       u8 protocol_id;
+
+       fw_core = kzalloc(sizeof(*fw_core), GFP_KERNEL);
+       if (!fw_core)
+               return -ENOMEM;
+
+       /* Parse CPorts and create connections */
+       for (i = 0; i < bundle->num_cports; i++) {
+               cport_desc = &bundle->cport_desc[i];
+               cport_id = le16_to_cpu(cport_desc->id);
+               protocol_id = cport_desc->protocol_id;
+
+               switch (protocol_id) {
+               case GREYBUS_PROTOCOL_FW_MANAGEMENT:
+                       /* Disallow multiple Firmware Management CPorts */
+                       if (fw_core->mgmt_connection) {
+                               dev_err(&bundle->dev,
+                                       "multiple management CPorts found\n");
+                               ret = -EINVAL;
+                               goto err_destroy_connections;
+                       }
+
+                       connection = gb_connection_create(bundle, cport_id,
+                                               gb_fw_mgmt_request_handler);
+                       if (IS_ERR(connection)) {
+                               ret = PTR_ERR(connection);
+                               dev_err(&bundle->dev,
+                                       "failed to create management connection (%d)\n",
+                                       ret);
+                               goto err_destroy_connections;
+                       }
+
+                       fw_core->mgmt_connection = connection;
+                       break;
+               case GREYBUS_PROTOCOL_FW_DOWNLOAD:
+                       /* Disallow multiple Firmware Download CPorts */
+                       if (fw_core->download_connection) {
+                               dev_err(&bundle->dev,
+                                       "multiple download CPorts found\n");
+                               ret = -EINVAL;
+                               goto err_destroy_connections;
+                       }
+
+                       connection = gb_connection_create(bundle, cport_id,
+                                               gb_fw_download_request_handler);
+                       if (IS_ERR(connection)) {
+                               dev_err(&bundle->dev, "failed to create download connection (%ld)\n",
+                                       PTR_ERR(connection));
+                       } else {
+                               fw_core->download_connection = connection;
+                       }
+
+                       break;
+               case GREYBUS_PROTOCOL_SPI:
+                       /* Disallow multiple SPI CPorts */
+                       if (fw_core->spi_connection) {
+                               dev_err(&bundle->dev,
+                                       "multiple SPI CPorts found\n");
+                               ret = -EINVAL;
+                               goto err_destroy_connections;
+                       }
+
+                       connection = gb_connection_create(bundle, cport_id,
+                                                         NULL);
+                       if (IS_ERR(connection)) {
+                               dev_err(&bundle->dev, "failed to create SPI connection (%ld)\n",
+                                       PTR_ERR(connection));
+                       } else {
+                               fw_core->spi_connection = connection;
+                       }
+
+                       break;
+               case GREYBUS_PROTOCOL_AUTHENTICATION:
+                       /* Disallow multiple CAP CPorts */
+                       if (fw_core->cap_connection) {
+                               dev_err(&bundle->dev, "multiple Authentication CPorts found\n");
+                               ret = -EINVAL;
+                               goto err_destroy_connections;
+                       }
+
+                       connection = gb_connection_create(bundle, cport_id,
+                                                         NULL);
+                       if (IS_ERR(connection)) {
+                               dev_err(&bundle->dev, "failed to create Authentication connection (%ld)\n",
+                                       PTR_ERR(connection));
+                       } else {
+                               fw_core->cap_connection = connection;
+                       }
+
+                       break;
+               default:
+                       dev_err(&bundle->dev, "invalid protocol id (0x%02x)\n",
+                               protocol_id);
+                       ret = -EINVAL;
+                       goto err_destroy_connections;
+               }
+       }
+
+       /* Firmware Management connection is mandatory */
+       if (!fw_core->mgmt_connection) {
+               dev_err(&bundle->dev, "missing management connection\n");
+               ret = -ENODEV;
+               goto err_destroy_connections;
+       }
+
+       ret = gb_fw_download_connection_init(fw_core->download_connection);
+       if (ret) {
+               /* We may still be able to work with the Interface */
+               dev_err(&bundle->dev, "failed to initialize firmware download connection, disable it (%d)\n",
+                       ret);
+               gb_connection_destroy(fw_core->download_connection);
+               fw_core->download_connection = NULL;
+       }
+
+       ret = gb_fw_spi_connection_init(fw_core->spi_connection);
+       if (ret) {
+               /* We may still be able to work with the Interface */
+               dev_err(&bundle->dev, "failed to initialize SPI connection, disable it (%d)\n",
+                       ret);
+               gb_connection_destroy(fw_core->spi_connection);
+               fw_core->spi_connection = NULL;
+       }
+
+       ret = gb_cap_connection_init(fw_core->cap_connection);
+       if (ret) {
+               /* We may still be able to work with the Interface */
+               dev_err(&bundle->dev, "failed to initialize CAP connection, disable it (%d)\n",
+                       ret);
+               gb_connection_destroy(fw_core->cap_connection);
+               fw_core->cap_connection = NULL;
+       }
+
+       ret = gb_fw_mgmt_connection_init(fw_core->mgmt_connection);
+       if (ret) {
+               /* We may still be able to work with the Interface */
+               dev_err(&bundle->dev, "failed to initialize firmware management connection, disable it (%d)\n",
+                       ret);
+               goto err_exit_connections;
+       }
+
+       greybus_set_drvdata(bundle, fw_core);
+
+       /* FIXME: Remove this after S2 Loader gets runtime PM support */
+       if (!(bundle->intf->quirks & GB_INTERFACE_QUIRK_NO_PM))
+               gb_pm_runtime_put_autosuspend(bundle);
+
+       return 0;
+
+err_exit_connections:
+       gb_cap_connection_exit(fw_core->cap_connection);
+       gb_fw_spi_connection_exit(fw_core->spi_connection);
+       gb_fw_download_connection_exit(fw_core->download_connection);
+err_destroy_connections:
+       gb_connection_destroy(fw_core->mgmt_connection);
+       gb_connection_destroy(fw_core->cap_connection);
+       gb_connection_destroy(fw_core->spi_connection);
+       gb_connection_destroy(fw_core->download_connection);
+       kfree(fw_core);
+
+       return ret;
+}
+
+static void gb_fw_core_disconnect(struct gb_bundle *bundle)
+{
+       struct gb_fw_core *fw_core = greybus_get_drvdata(bundle);
+       int ret;
+
+       /* FIXME: Remove this after S2 Loader gets runtime PM support */
+       if (!(bundle->intf->quirks & GB_INTERFACE_QUIRK_NO_PM)) {
+               ret = gb_pm_runtime_get_sync(bundle);
+               if (ret)
+                       gb_pm_runtime_get_noresume(bundle);
+       }
+
+       gb_fw_mgmt_connection_exit(fw_core->mgmt_connection);
+       gb_cap_connection_exit(fw_core->cap_connection);
+       gb_fw_spi_connection_exit(fw_core->spi_connection);
+       gb_fw_download_connection_exit(fw_core->download_connection);
+
+       gb_connection_destroy(fw_core->mgmt_connection);
+       gb_connection_destroy(fw_core->cap_connection);
+       gb_connection_destroy(fw_core->spi_connection);
+       gb_connection_destroy(fw_core->download_connection);
+
+       kfree(fw_core);
+}
+
+static const struct greybus_bundle_id gb_fw_core_id_table[] = {
+       { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_FW_MANAGEMENT) },
+       { }
+};
+
+static struct greybus_driver gb_fw_core_driver = {
+       .name           = "gb-firmware",
+       .probe          = gb_fw_core_probe,
+       .disconnect     = gb_fw_core_disconnect,
+       .id_table       = gb_fw_core_id_table,
+};
+
+static int fw_core_init(void)
+{
+       int ret;
+
+       ret = fw_mgmt_init();
+       if (ret) {
+               pr_err("Failed to initialize fw-mgmt core (%d)\n", ret);
+               return ret;
+       }
+
+       ret = cap_init();
+       if (ret) {
+               pr_err("Failed to initialize component authentication core (%d)\n",
+                      ret);
+               goto fw_mgmt_exit;
+       }
+
+       ret = greybus_register(&gb_fw_core_driver);
+       if (ret)
+               goto cap_exit;
+
+       return 0;
+
+cap_exit:
+       cap_exit();
+fw_mgmt_exit:
+       fw_mgmt_exit();
+
+       return ret;
+}
+module_init(fw_core_init);
+
+static void __exit fw_core_exit(void)
+{
+       greybus_deregister(&gb_fw_core_driver);
+       cap_exit();
+       fw_mgmt_exit();
+}
+module_exit(fw_core_exit);
+
+MODULE_ALIAS("greybus:firmware");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_DESCRIPTION("Greybus Firmware Bundle Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/fw-download.c b/drivers/staging/greybus/fw-download.c
new file mode 100644 (file)
index 0000000..2d72468
--- /dev/null
@@ -0,0 +1,465 @@
+/*
+ * Greybus Firmware Download Protocol Driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include "firmware.h"
+#include "greybus.h"
+
+/* Estimated minimum buffer size, actual size can be smaller than this */
+#define MIN_FETCH_SIZE         512
+/* Timeout, in jiffies, within which fetch or release firmware must be called */
+#define NEXT_REQ_TIMEOUT_J     msecs_to_jiffies(1000)
+
+struct fw_request {
+       u8                      firmware_id;
+       bool                    disabled;
+       bool                    timedout;
+       char                    name[FW_NAME_SIZE];
+       const struct firmware   *fw;
+       struct list_head        node;
+
+       struct delayed_work     dwork;
+       /* Timeout, in jiffies, within which the firmware shall download */
+       unsigned long           release_timeout_j;
+       struct kref             kref;
+       struct fw_download      *fw_download;
+};
+
+struct fw_download {
+       struct device           *parent;
+       struct gb_connection    *connection;
+       struct list_head        fw_requests;
+       struct ida              id_map;
+       struct mutex            mutex;
+};
+
+static void fw_req_release(struct kref *kref)
+{
+       struct fw_request *fw_req = container_of(kref, struct fw_request, kref);
+
+       dev_dbg(fw_req->fw_download->parent, "firmware %s released\n",
+               fw_req->name);
+
+       release_firmware(fw_req->fw);
+
+       /*
+        * The request timed out and the module may send a fetch-fw or
+        * release-fw request later. Lets block the id we allocated for this
+        * request, so that the AP doesn't refer to a later fw-request (with
+        * same firmware_id) for the old timedout fw-request.
+        *
+        * NOTE:
+        *
+        * This also means that after 255 timeouts we will fail to service new
+        * firmware downloads. But what else can we do in that case anyway? Lets
+        * just hope that it never happens.
+        */
+       if (!fw_req->timedout)
+               ida_simple_remove(&fw_req->fw_download->id_map,
+                                 fw_req->firmware_id);
+
+       kfree(fw_req);
+}
+
+/*
+ * Incoming requests are serialized for a connection, and the only race possible
+ * is between the timeout handler freeing this and an incoming request.
+ *
+ * The operations on the fw-request list are protected by the mutex and
+ * get_fw_req() increments the reference count before returning a fw_req pointer
+ * to the users.
+ *
+ * free_firmware() also takes the mutex while removing an entry from the list,
+ * it guarantees that every user of fw_req has taken a kref-reference by now and
+ * we wouldn't have any new users.
+ *
+ * Once the last user drops the reference, the fw_req structure is freed.
+ */
+static void put_fw_req(struct fw_request *fw_req)
+{
+       kref_put(&fw_req->kref, fw_req_release);
+}
+
+/* Caller must call put_fw_req() after using struct fw_request */
+static struct fw_request *get_fw_req(struct fw_download *fw_download,
+                                    u8 firmware_id)
+{
+       struct fw_request *fw_req;
+
+       mutex_lock(&fw_download->mutex);
+
+       list_for_each_entry(fw_req, &fw_download->fw_requests, node) {
+               if (fw_req->firmware_id == firmware_id) {
+                       kref_get(&fw_req->kref);
+                       goto unlock;
+               }
+       }
+
+       fw_req = NULL;
+
+unlock:
+       mutex_unlock(&fw_download->mutex);
+
+       return fw_req;
+}
+
+static void free_firmware(struct fw_download *fw_download,
+                         struct fw_request *fw_req)
+{
+       /* Already disabled from timeout handlers */
+       if (fw_req->disabled)
+               return;
+
+       mutex_lock(&fw_download->mutex);
+       list_del(&fw_req->node);
+       mutex_unlock(&fw_download->mutex);
+
+       fw_req->disabled = true;
+       put_fw_req(fw_req);
+}
+
+static void fw_request_timedout(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct fw_request *fw_req = container_of(dwork, struct fw_request, dwork);
+       struct fw_download *fw_download = fw_req->fw_download;
+
+       dev_err(fw_download->parent,
+               "Timed out waiting for fetch / release firmware requests: %u\n",
+               fw_req->firmware_id);
+
+       fw_req->timedout = true;
+       free_firmware(fw_download, fw_req);
+}
+
+static int exceeds_release_timeout(struct fw_request *fw_req)
+{
+       struct fw_download *fw_download = fw_req->fw_download;
+
+       if (time_before(jiffies, fw_req->release_timeout_j))
+               return 0;
+
+       dev_err(fw_download->parent,
+               "Firmware download didn't finish in time, abort: %d\n",
+               fw_req->firmware_id);
+
+       fw_req->timedout = true;
+       free_firmware(fw_download, fw_req);
+
+       return -ETIMEDOUT;
+}
+
+/* This returns path of the firmware blob on the disk */
+static struct fw_request *find_firmware(struct fw_download *fw_download,
+                                       const char *tag)
+{
+       struct gb_interface *intf = fw_download->connection->bundle->intf;
+       struct fw_request *fw_req;
+       int ret, req_count;
+
+       fw_req = kzalloc(sizeof(*fw_req), GFP_KERNEL);
+       if (!fw_req)
+               return ERR_PTR(-ENOMEM);
+
+       /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
+       ret = ida_simple_get(&fw_download->id_map, 1, 256, GFP_KERNEL);
+       if (ret < 0) {
+               dev_err(fw_download->parent,
+                       "failed to allocate firmware id (%d)\n", ret);
+               goto err_free_req;
+       }
+       fw_req->firmware_id = ret;
+
+       snprintf(fw_req->name, sizeof(fw_req->name),
+                FW_NAME_PREFIX "%08x_%08x_%08x_%08x_%s.tftf",
+                intf->ddbl1_manufacturer_id, intf->ddbl1_product_id,
+                intf->vendor_id, intf->product_id, tag);
+
+       dev_info(fw_download->parent, "Requested firmware package '%s'\n",
+                fw_req->name);
+
+       ret = request_firmware(&fw_req->fw, fw_req->name, fw_download->parent);
+       if (ret) {
+               dev_err(fw_download->parent,
+                       "firmware request failed for %s (%d)\n", fw_req->name,
+                       ret);
+               goto err_free_id;
+       }
+
+       fw_req->fw_download = fw_download;
+       kref_init(&fw_req->kref);
+
+       mutex_lock(&fw_download->mutex);
+       list_add(&fw_req->node, &fw_download->fw_requests);
+       mutex_unlock(&fw_download->mutex);
+
+       /* Timeout, in jiffies, within which firmware should get loaded */
+       req_count = DIV_ROUND_UP(fw_req->fw->size, MIN_FETCH_SIZE);
+       fw_req->release_timeout_j = jiffies + req_count * NEXT_REQ_TIMEOUT_J;
+
+       INIT_DELAYED_WORK(&fw_req->dwork, fw_request_timedout);
+       schedule_delayed_work(&fw_req->dwork, NEXT_REQ_TIMEOUT_J);
+
+       return fw_req;
+
+err_free_id:
+       ida_simple_remove(&fw_download->id_map, fw_req->firmware_id);
+err_free_req:
+       kfree(fw_req);
+
+       return ERR_PTR(ret);
+}
+
+static int fw_download_find_firmware(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct fw_download *fw_download = gb_connection_get_data(connection);
+       struct gb_fw_download_find_firmware_request *request;
+       struct gb_fw_download_find_firmware_response *response;
+       struct fw_request *fw_req;
+       const char *tag;
+
+       if (op->request->payload_size != sizeof(*request)) {
+               dev_err(fw_download->parent,
+                       "illegal size of find firmware request (%zu != %zu)\n",
+                       op->request->payload_size, sizeof(*request));
+               return -EINVAL;
+       }
+
+       request = op->request->payload;
+       tag = (const char *)request->firmware_tag;
+
+       /* firmware_tag must be null-terminated */
+       if (strnlen(tag, GB_FIRMWARE_TAG_MAX_SIZE) == GB_FIRMWARE_TAG_MAX_SIZE) {
+               dev_err(fw_download->parent,
+                       "firmware-tag is not null-terminated\n");
+               return -EINVAL;
+       }
+
+       fw_req = find_firmware(fw_download, tag);
+       if (IS_ERR(fw_req))
+               return PTR_ERR(fw_req);
+
+       if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) {
+               dev_err(fw_download->parent, "error allocating response\n");
+               free_firmware(fw_download, fw_req);
+               return -ENOMEM;
+       }
+
+       response = op->response->payload;
+       response->firmware_id = fw_req->firmware_id;
+       response->size = cpu_to_le32(fw_req->fw->size);
+
+       dev_dbg(fw_download->parent,
+               "firmware size is %zu bytes\n", fw_req->fw->size);
+
+       return 0;
+}
+
+static int fw_download_fetch_firmware(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct fw_download *fw_download = gb_connection_get_data(connection);
+       struct gb_fw_download_fetch_firmware_request *request;
+       struct gb_fw_download_fetch_firmware_response *response;
+       struct fw_request *fw_req;
+       const struct firmware *fw;
+       unsigned int offset, size;
+       u8 firmware_id;
+       int ret = 0;
+
+       if (op->request->payload_size != sizeof(*request)) {
+               dev_err(fw_download->parent,
+                       "Illegal size of fetch firmware request (%zu %zu)\n",
+                       op->request->payload_size, sizeof(*request));
+               return -EINVAL;
+       }
+
+       request = op->request->payload;
+       offset = le32_to_cpu(request->offset);
+       size = le32_to_cpu(request->size);
+       firmware_id = request->firmware_id;
+
+       fw_req = get_fw_req(fw_download, firmware_id);
+       if (!fw_req) {
+               dev_err(fw_download->parent,
+                       "firmware not available for id: %02u\n", firmware_id);
+               return -EINVAL;
+       }
+
+       /* Make sure work handler isn't running in parallel */
+       cancel_delayed_work_sync(&fw_req->dwork);
+
+       /* We timed-out before reaching here ? */
+       if (fw_req->disabled) {
+               ret = -ETIMEDOUT;
+               goto put_fw;
+       }
+
+       /*
+        * Firmware download must finish within a limited time interval. If it
+        * doesn't, then we might have a buggy Module on the other side. Abort
+        * download.
+        */
+       ret = exceeds_release_timeout(fw_req);
+       if (ret)
+               goto put_fw;
+
+       fw = fw_req->fw;
+
+       if (offset >= fw->size || size > fw->size - offset) {
+               dev_err(fw_download->parent,
+                       "bad fetch firmware request (offs = %u, size = %u)\n",
+                       offset, size);
+               ret = -EINVAL;
+               goto put_fw;
+       }
+
+       if (!gb_operation_response_alloc(op, sizeof(*response) + size,
+                                        GFP_KERNEL)) {
+               dev_err(fw_download->parent,
+                       "error allocating fetch firmware response\n");
+               ret = -ENOMEM;
+               goto put_fw;
+       }
+
+       response = op->response->payload;
+       memcpy(response->data, fw->data + offset, size);
+
+       dev_dbg(fw_download->parent,
+               "responding with firmware (offs = %u, size = %u)\n", offset,
+               size);
+
+       /* Refresh timeout */
+       schedule_delayed_work(&fw_req->dwork, NEXT_REQ_TIMEOUT_J);
+
+put_fw:
+       put_fw_req(fw_req);
+
+       return ret;
+}
+
+static int fw_download_release_firmware(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct fw_download *fw_download = gb_connection_get_data(connection);
+       struct gb_fw_download_release_firmware_request *request;
+       struct fw_request *fw_req;
+       u8 firmware_id;
+
+       if (op->request->payload_size != sizeof(*request)) {
+               dev_err(fw_download->parent,
+                       "Illegal size of release firmware request (%zu %zu)\n",
+                       op->request->payload_size, sizeof(*request));
+               return -EINVAL;
+       }
+
+       request = op->request->payload;
+       firmware_id = request->firmware_id;
+
+       fw_req = get_fw_req(fw_download, firmware_id);
+       if (!fw_req) {
+               dev_err(fw_download->parent,
+                       "firmware not available for id: %02u\n", firmware_id);
+               return -EINVAL;
+       }
+
+       cancel_delayed_work_sync(&fw_req->dwork);
+
+       free_firmware(fw_download, fw_req);
+       put_fw_req(fw_req);
+
+       dev_dbg(fw_download->parent, "release firmware\n");
+
+       return 0;
+}
+
+int gb_fw_download_request_handler(struct gb_operation *op)
+{
+       u8 type = op->type;
+
+       switch (type) {
+       case GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE:
+               return fw_download_find_firmware(op);
+       case GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE:
+               return fw_download_fetch_firmware(op);
+       case GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE:
+               return fw_download_release_firmware(op);
+       default:
+               dev_err(&op->connection->bundle->dev,
+                       "unsupported request: %u\n", type);
+               return -EINVAL;
+       }
+}
+
+int gb_fw_download_connection_init(struct gb_connection *connection)
+{
+       struct fw_download *fw_download;
+       int ret;
+
+       if (!connection)
+               return 0;
+
+       fw_download = kzalloc(sizeof(*fw_download), GFP_KERNEL);
+       if (!fw_download)
+               return -ENOMEM;
+
+       fw_download->parent = &connection->bundle->dev;
+       INIT_LIST_HEAD(&fw_download->fw_requests);
+       ida_init(&fw_download->id_map);
+       gb_connection_set_data(connection, fw_download);
+       fw_download->connection = connection;
+       mutex_init(&fw_download->mutex);
+
+       ret = gb_connection_enable(connection);
+       if (ret)
+               goto err_destroy_id_map;
+
+       return 0;
+
+err_destroy_id_map:
+       ida_destroy(&fw_download->id_map);
+       kfree(fw_download);
+
+       return ret;
+}
+
+void gb_fw_download_connection_exit(struct gb_connection *connection)
+{
+       struct fw_download *fw_download;
+       struct fw_request *fw_req, *tmp;
+
+       if (!connection)
+               return;
+
+       fw_download = gb_connection_get_data(connection);
+       gb_connection_disable(fw_download->connection);
+
+       /*
+        * Make sure we have a reference to the pending requests, before they
+        * are freed from the timeout handler.
+        */
+       mutex_lock(&fw_download->mutex);
+       list_for_each_entry(fw_req, &fw_download->fw_requests, node)
+               kref_get(&fw_req->kref);
+       mutex_unlock(&fw_download->mutex);
+
+       /* Release pending firmware packages */
+       list_for_each_entry_safe(fw_req, tmp, &fw_download->fw_requests, node) {
+               cancel_delayed_work_sync(&fw_req->dwork);
+               free_firmware(fw_download, fw_req);
+               put_fw_req(fw_req);
+       }
+
+       ida_destroy(&fw_download->id_map);
+       kfree(fw_download);
+}
diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c
new file mode 100644 (file)
index 0000000..3cd6cf0
--- /dev/null
@@ -0,0 +1,721 @@
+/*
+ * Greybus Firmware Management Protocol Driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+
+#include "firmware.h"
+#include "greybus_firmware.h"
+#include "greybus.h"
+
+#define FW_MGMT_TIMEOUT_MS             1000
+
+struct fw_mgmt {
+       struct device           *parent;
+       struct gb_connection    *connection;
+       struct kref             kref;
+       struct list_head        node;
+
+       /* Common id-map for interface and backend firmware requests */
+       struct ida              id_map;
+       struct mutex            mutex;
+       struct completion       completion;
+       struct cdev             cdev;
+       struct device           *class_device;
+       dev_t                   dev_num;
+       unsigned int            timeout_jiffies;
+       bool                    disabled; /* connection getting disabled */
+
+       /* Interface Firmware specific fields */
+       bool                    mode_switch_started;
+       bool                    intf_fw_loaded;
+       u8                      intf_fw_request_id;
+       u8                      intf_fw_status;
+       u16                     intf_fw_major;
+       u16                     intf_fw_minor;
+
+       /* Backend Firmware specific fields */
+       u8                      backend_fw_request_id;
+       u8                      backend_fw_status;
+};
+
+/*
+ * Number of minor devices this driver supports.
+ * There will be exactly one required per Interface.
+ */
+#define NUM_MINORS             U8_MAX
+
+static struct class *fw_mgmt_class;
+static dev_t fw_mgmt_dev_num;
+static DEFINE_IDA(fw_mgmt_minors_map);
+static LIST_HEAD(fw_mgmt_list);
+static DEFINE_MUTEX(list_mutex);
+
+static void fw_mgmt_kref_release(struct kref *kref)
+{
+       struct fw_mgmt *fw_mgmt = container_of(kref, struct fw_mgmt, kref);
+
+       ida_destroy(&fw_mgmt->id_map);
+       kfree(fw_mgmt);
+}
+
+/*
+ * All users of fw_mgmt take a reference (from within list_mutex lock), before
+ * they get a pointer to play with. And the structure will be freed only after
+ * the last user has put the reference to it.
+ */
+static void put_fw_mgmt(struct fw_mgmt *fw_mgmt)
+{
+       kref_put(&fw_mgmt->kref, fw_mgmt_kref_release);
+}
+
+/* Caller must call put_fw_mgmt() after using struct fw_mgmt */
+static struct fw_mgmt *get_fw_mgmt(struct cdev *cdev)
+{
+       struct fw_mgmt *fw_mgmt;
+
+       mutex_lock(&list_mutex);
+
+       list_for_each_entry(fw_mgmt, &fw_mgmt_list, node) {
+               if (&fw_mgmt->cdev == cdev) {
+                       kref_get(&fw_mgmt->kref);
+                       goto unlock;
+               }
+       }
+
+       fw_mgmt = NULL;
+
+unlock:
+       mutex_unlock(&list_mutex);
+
+       return fw_mgmt;
+}
+
+static int fw_mgmt_interface_fw_version_operation(struct fw_mgmt *fw_mgmt,
+               struct fw_mgmt_ioc_get_intf_version *fw_info)
+{
+       struct gb_connection *connection = fw_mgmt->connection;
+       struct gb_fw_mgmt_interface_fw_version_response response;
+       int ret;
+
+       ret = gb_operation_sync(connection,
+                               GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION, NULL, 0,
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(fw_mgmt->parent,
+                       "failed to get interface firmware version (%d)\n", ret);
+               return ret;
+       }
+
+       fw_info->major = le16_to_cpu(response.major);
+       fw_info->minor = le16_to_cpu(response.minor);
+
+       strncpy(fw_info->firmware_tag, response.firmware_tag,
+               GB_FIRMWARE_TAG_MAX_SIZE);
+
+       /*
+        * The firmware-tag should be NULL terminated, otherwise throw error but
+        * don't fail.
+        */
+       if (fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
+               dev_err(fw_mgmt->parent,
+                       "fw-version: firmware-tag is not NULL terminated\n");
+               fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] = '\0';
+       }
+
+       return 0;
+}
+
+static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt,
+                                              u8 load_method, const char *tag)
+{
+       struct gb_fw_mgmt_load_and_validate_fw_request request;
+       int ret;
+
+       if (load_method != GB_FW_LOAD_METHOD_UNIPRO &&
+           load_method != GB_FW_LOAD_METHOD_INTERNAL) {
+               dev_err(fw_mgmt->parent,
+                       "invalid load-method (%d)\n", load_method);
+               return -EINVAL;
+       }
+
+       request.load_method = load_method;
+       strncpy(request.firmware_tag, tag, GB_FIRMWARE_TAG_MAX_SIZE);
+
+       /*
+        * The firmware-tag should be NULL terminated, otherwise throw error and
+        * fail.
+        */
+       if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
+               dev_err(fw_mgmt->parent, "load-and-validate: firmware-tag is not NULL terminated\n");
+               return -EINVAL;
+       }
+
+       /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
+       ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
+       if (ret < 0) {
+               dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
+                       ret);
+               return ret;
+       }
+
+       fw_mgmt->intf_fw_request_id = ret;
+       fw_mgmt->intf_fw_loaded = false;
+       request.request_id = ret;
+
+       ret = gb_operation_sync(fw_mgmt->connection,
+                               GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW, &request,
+                               sizeof(request), NULL, 0);
+       if (ret) {
+               ida_simple_remove(&fw_mgmt->id_map,
+                                 fw_mgmt->intf_fw_request_id);
+               fw_mgmt->intf_fw_request_id = 0;
+               dev_err(fw_mgmt->parent,
+                       "load and validate firmware request failed (%d)\n",
+                       ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int fw_mgmt_interface_fw_loaded_operation(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
+       struct gb_fw_mgmt_loaded_fw_request *request;
+
+       /* No pending load and validate request ? */
+       if (!fw_mgmt->intf_fw_request_id) {
+               dev_err(fw_mgmt->parent,
+                       "unexpected firmware loaded request received\n");
+               return -ENODEV;
+       }
+
+       if (op->request->payload_size != sizeof(*request)) {
+               dev_err(fw_mgmt->parent, "illegal size of firmware loaded request (%zu != %zu)\n",
+                       op->request->payload_size, sizeof(*request));
+               return -EINVAL;
+       }
+
+       request = op->request->payload;
+
+       /* Invalid request-id ? */
+       if (request->request_id != fw_mgmt->intf_fw_request_id) {
+               dev_err(fw_mgmt->parent, "invalid request id for firmware loaded request (%02u != %02u)\n",
+                       fw_mgmt->intf_fw_request_id, request->request_id);
+               return -ENODEV;
+       }
+
+       ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
+       fw_mgmt->intf_fw_request_id = 0;
+       fw_mgmt->intf_fw_status = request->status;
+       fw_mgmt->intf_fw_major = le16_to_cpu(request->major);
+       fw_mgmt->intf_fw_minor = le16_to_cpu(request->minor);
+
+       if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_FAILED)
+               dev_err(fw_mgmt->parent,
+                       "failed to load interface firmware, status:%02x\n",
+                       fw_mgmt->intf_fw_status);
+       else if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_VALIDATION_FAILED)
+               dev_err(fw_mgmt->parent,
+                       "failed to validate interface firmware, status:%02x\n",
+                       fw_mgmt->intf_fw_status);
+       else
+               fw_mgmt->intf_fw_loaded = true;
+
+       complete(&fw_mgmt->completion);
+
+       return 0;
+}
+
+static int fw_mgmt_backend_fw_version_operation(struct fw_mgmt *fw_mgmt,
+               struct fw_mgmt_ioc_get_backend_version *fw_info)
+{
+       struct gb_connection *connection = fw_mgmt->connection;
+       struct gb_fw_mgmt_backend_fw_version_request request;
+       struct gb_fw_mgmt_backend_fw_version_response response;
+       int ret;
+
+       strncpy(request.firmware_tag, fw_info->firmware_tag,
+               GB_FIRMWARE_TAG_MAX_SIZE);
+
+       /*
+        * The firmware-tag should be NULL terminated, otherwise throw error and
+        * fail.
+        */
+       if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
+               dev_err(fw_mgmt->parent, "backend-version: firmware-tag is not NULL terminated\n");
+               return -EINVAL;
+       }
+
+       ret = gb_operation_sync(connection,
+                               GB_FW_MGMT_TYPE_BACKEND_FW_VERSION, &request,
+                               sizeof(request), &response, sizeof(response));
+       if (ret) {
+               dev_err(fw_mgmt->parent, "failed to get version of %s backend firmware (%d)\n",
+                       fw_info->firmware_tag, ret);
+               return ret;
+       }
+
+       fw_info->status = response.status;
+
+       /* Reset version as that should be non-zero only for success case */
+       fw_info->major = 0;
+       fw_info->minor = 0;
+
+       switch (fw_info->status) {
+       case GB_FW_BACKEND_VERSION_STATUS_SUCCESS:
+               fw_info->major = le16_to_cpu(response.major);
+               fw_info->minor = le16_to_cpu(response.minor);
+               break;
+       case GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE:
+       case GB_FW_BACKEND_VERSION_STATUS_RETRY:
+               break;
+       case GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED:
+               dev_err(fw_mgmt->parent,
+                       "Firmware with tag %s is not supported by Interface\n",
+                       fw_info->firmware_tag);
+               break;
+       default:
+               dev_err(fw_mgmt->parent, "Invalid status received: %u\n",
+                       fw_info->status);
+       }
+
+       return 0;
+}
+
+static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt,
+                                              char *tag)
+{
+       struct gb_fw_mgmt_backend_fw_update_request request;
+       int ret;
+
+       strncpy(request.firmware_tag, tag, GB_FIRMWARE_TAG_MAX_SIZE);
+
+       /*
+        * The firmware-tag should be NULL terminated, otherwise throw error and
+        * fail.
+        */
+       if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
+               dev_err(fw_mgmt->parent, "backend-update: firmware-tag is not NULL terminated\n");
+               return -EINVAL;
+       }
+
+       /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
+       ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
+       if (ret < 0) {
+               dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
+                       ret);
+               return ret;
+       }
+
+       fw_mgmt->backend_fw_request_id = ret;
+       request.request_id = ret;
+
+       ret = gb_operation_sync(fw_mgmt->connection,
+                               GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE, &request,
+                               sizeof(request), NULL, 0);
+       if (ret) {
+               ida_simple_remove(&fw_mgmt->id_map,
+                                 fw_mgmt->backend_fw_request_id);
+               fw_mgmt->backend_fw_request_id = 0;
+               dev_err(fw_mgmt->parent,
+                       "backend %s firmware update request failed (%d)\n", tag,
+                       ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int fw_mgmt_backend_fw_updated_operation(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
+       struct gb_fw_mgmt_backend_fw_updated_request *request;
+
+       /* No pending load and validate request ? */
+       if (!fw_mgmt->backend_fw_request_id) {
+               dev_err(fw_mgmt->parent, "unexpected backend firmware updated request received\n");
+               return -ENODEV;
+       }
+
+       if (op->request->payload_size != sizeof(*request)) {
+               dev_err(fw_mgmt->parent, "illegal size of backend firmware updated request (%zu != %zu)\n",
+                       op->request->payload_size, sizeof(*request));
+               return -EINVAL;
+       }
+
+       request = op->request->payload;
+
+       /* Invalid request-id ? */
+       if (request->request_id != fw_mgmt->backend_fw_request_id) {
+               dev_err(fw_mgmt->parent, "invalid request id for backend firmware updated request (%02u != %02u)\n",
+                       fw_mgmt->backend_fw_request_id, request->request_id);
+               return -ENODEV;
+       }
+
+       ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
+       fw_mgmt->backend_fw_request_id = 0;
+       fw_mgmt->backend_fw_status = request->status;
+
+       if ((fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_SUCCESS) &&
+           (fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_RETRY))
+               dev_err(fw_mgmt->parent,
+                       "failed to load backend firmware: %02x\n",
+                       fw_mgmt->backend_fw_status);
+
+       complete(&fw_mgmt->completion);
+
+       return 0;
+}
+
+/* Char device fops */
+
+static int fw_mgmt_open(struct inode *inode, struct file *file)
+{
+       struct fw_mgmt *fw_mgmt = get_fw_mgmt(inode->i_cdev);
+
+       /* fw_mgmt structure can't get freed until file descriptor is closed */
+       if (fw_mgmt) {
+               file->private_data = fw_mgmt;
+               return 0;
+       }
+
+       return -ENODEV;
+}
+
+static int fw_mgmt_release(struct inode *inode, struct file *file)
+{
+       struct fw_mgmt *fw_mgmt = file->private_data;
+
+       put_fw_mgmt(fw_mgmt);
+       return 0;
+}
+
+static int fw_mgmt_ioctl(struct fw_mgmt *fw_mgmt, unsigned int cmd,
+                        void __user *buf)
+{
+       struct fw_mgmt_ioc_get_intf_version intf_fw_info;
+       struct fw_mgmt_ioc_get_backend_version backend_fw_info;
+       struct fw_mgmt_ioc_intf_load_and_validate intf_load;
+       struct fw_mgmt_ioc_backend_fw_update backend_update;
+       unsigned int timeout;
+       int ret;
+
+       /* Reject any operations after mode-switch has started */
+       if (fw_mgmt->mode_switch_started)
+               return -EBUSY;
+
+       switch (cmd) {
+       case FW_MGMT_IOC_GET_INTF_FW:
+               ret = fw_mgmt_interface_fw_version_operation(fw_mgmt,
+                                                            &intf_fw_info);
+               if (ret)
+                       return ret;
+
+               if (copy_to_user(buf, &intf_fw_info, sizeof(intf_fw_info)))
+                       return -EFAULT;
+
+               return 0;
+       case FW_MGMT_IOC_GET_BACKEND_FW:
+               if (copy_from_user(&backend_fw_info, buf,
+                                  sizeof(backend_fw_info)))
+                       return -EFAULT;
+
+               ret = fw_mgmt_backend_fw_version_operation(fw_mgmt,
+                                                          &backend_fw_info);
+               if (ret)
+                       return ret;
+
+               if (copy_to_user(buf, &backend_fw_info,
+                                sizeof(backend_fw_info)))
+                       return -EFAULT;
+
+               return 0;
+       case FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE:
+               if (copy_from_user(&intf_load, buf, sizeof(intf_load)))
+                       return -EFAULT;
+
+               ret = fw_mgmt_load_and_validate_operation(fw_mgmt,
+                               intf_load.load_method, intf_load.firmware_tag);
+               if (ret)
+                       return ret;
+
+               if (!wait_for_completion_timeout(&fw_mgmt->completion,
+                                                fw_mgmt->timeout_jiffies)) {
+                       dev_err(fw_mgmt->parent, "timed out waiting for firmware load and validation to finish\n");
+                       return -ETIMEDOUT;
+               }
+
+               intf_load.status = fw_mgmt->intf_fw_status;
+               intf_load.major = fw_mgmt->intf_fw_major;
+               intf_load.minor = fw_mgmt->intf_fw_minor;
+
+               if (copy_to_user(buf, &intf_load, sizeof(intf_load)))
+                       return -EFAULT;
+
+               return 0;
+       case FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE:
+               if (copy_from_user(&backend_update, buf,
+                                  sizeof(backend_update)))
+                       return -EFAULT;
+
+               ret = fw_mgmt_backend_fw_update_operation(fw_mgmt,
+                               backend_update.firmware_tag);
+               if (ret)
+                       return ret;
+
+               if (!wait_for_completion_timeout(&fw_mgmt->completion,
+                                                fw_mgmt->timeout_jiffies)) {
+                       dev_err(fw_mgmt->parent, "timed out waiting for backend firmware update to finish\n");
+                       return -ETIMEDOUT;
+               }
+
+               backend_update.status = fw_mgmt->backend_fw_status;
+
+               if (copy_to_user(buf, &backend_update, sizeof(backend_update)))
+                       return -EFAULT;
+
+               return 0;
+       case FW_MGMT_IOC_SET_TIMEOUT_MS:
+               if (get_user(timeout, (unsigned int __user *)buf))
+                       return -EFAULT;
+
+               if (!timeout) {
+                       dev_err(fw_mgmt->parent, "timeout can't be zero\n");
+                       return -EINVAL;
+               }
+
+               fw_mgmt->timeout_jiffies = msecs_to_jiffies(timeout);
+
+               return 0;
+       case FW_MGMT_IOC_MODE_SWITCH:
+               if (!fw_mgmt->intf_fw_loaded) {
+                       dev_err(fw_mgmt->parent,
+                               "Firmware not loaded for mode-switch\n");
+                       return -EPERM;
+               }
+
+               /*
+                * Disallow new ioctls as the fw-core bundle driver is going to
+                * get disconnected soon and the character device will get
+                * removed.
+                */
+               fw_mgmt->mode_switch_started = true;
+
+               ret = gb_interface_request_mode_switch(fw_mgmt->connection->intf);
+               if (ret) {
+                       dev_err(fw_mgmt->parent, "Mode-switch failed: %d\n",
+                               ret);
+                       fw_mgmt->mode_switch_started = false;
+                       return ret;
+               }
+
+               return 0;
+       default:
+               return -ENOTTY;
+       }
+}
+
+static long fw_mgmt_ioctl_unlocked(struct file *file, unsigned int cmd,
+                                  unsigned long arg)
+{
+       struct fw_mgmt *fw_mgmt = file->private_data;
+       struct gb_bundle *bundle = fw_mgmt->connection->bundle;
+       int ret = -ENODEV;
+
+       /*
+        * Serialize ioctls.
+        *
+        * We don't want the user to do few operations in parallel. For example,
+        * updating Interface firmware in parallel for the same Interface. There
+        * is no need to do things in parallel for speed and we can avoid having
+        * complicated code for now.
+        *
+        * This is also used to protect ->disabled, which is used to check if
+        * the connection is getting disconnected, so that we don't start any
+        * new operations.
+        */
+       mutex_lock(&fw_mgmt->mutex);
+       if (!fw_mgmt->disabled) {
+               ret = gb_pm_runtime_get_sync(bundle);
+               if (!ret) {
+                       ret = fw_mgmt_ioctl(fw_mgmt, cmd, (void __user *)arg);
+                       gb_pm_runtime_put_autosuspend(bundle);
+               }
+       }
+       mutex_unlock(&fw_mgmt->mutex);
+
+       return ret;
+}
+
+static const struct file_operations fw_mgmt_fops = {
+       .owner          = THIS_MODULE,
+       .open           = fw_mgmt_open,
+       .release        = fw_mgmt_release,
+       .unlocked_ioctl = fw_mgmt_ioctl_unlocked,
+};
+
+int gb_fw_mgmt_request_handler(struct gb_operation *op)
+{
+       u8 type = op->type;
+
+       switch (type) {
+       case GB_FW_MGMT_TYPE_LOADED_FW:
+               return fw_mgmt_interface_fw_loaded_operation(op);
+       case GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED:
+               return fw_mgmt_backend_fw_updated_operation(op);
+       default:
+               dev_err(&op->connection->bundle->dev,
+                       "unsupported request: %u\n", type);
+               return -EINVAL;
+       }
+}
+
+int gb_fw_mgmt_connection_init(struct gb_connection *connection)
+{
+       struct fw_mgmt *fw_mgmt;
+       int ret, minor;
+
+       if (!connection)
+               return 0;
+
+       fw_mgmt = kzalloc(sizeof(*fw_mgmt), GFP_KERNEL);
+       if (!fw_mgmt)
+               return -ENOMEM;
+
+       fw_mgmt->parent = &connection->bundle->dev;
+       fw_mgmt->timeout_jiffies = msecs_to_jiffies(FW_MGMT_TIMEOUT_MS);
+       fw_mgmt->connection = connection;
+
+       gb_connection_set_data(connection, fw_mgmt);
+       init_completion(&fw_mgmt->completion);
+       ida_init(&fw_mgmt->id_map);
+       mutex_init(&fw_mgmt->mutex);
+       kref_init(&fw_mgmt->kref);
+
+       mutex_lock(&list_mutex);
+       list_add(&fw_mgmt->node, &fw_mgmt_list);
+       mutex_unlock(&list_mutex);
+
+       ret = gb_connection_enable(connection);
+       if (ret)
+               goto err_list_del;
+
+       minor = ida_simple_get(&fw_mgmt_minors_map, 0, NUM_MINORS, GFP_KERNEL);
+       if (minor < 0) {
+               ret = minor;
+               goto err_connection_disable;
+       }
+
+       /* Add a char device to allow userspace to interact with fw-mgmt */
+       fw_mgmt->dev_num = MKDEV(MAJOR(fw_mgmt_dev_num), minor);
+       cdev_init(&fw_mgmt->cdev, &fw_mgmt_fops);
+
+       ret = cdev_add(&fw_mgmt->cdev, fw_mgmt->dev_num, 1);
+       if (ret)
+               goto err_remove_ida;
+
+       /* Add a soft link to the previously added char-dev within the bundle */
+       fw_mgmt->class_device = device_create(fw_mgmt_class, fw_mgmt->parent,
+                                             fw_mgmt->dev_num, NULL,
+                                             "gb-fw-mgmt-%d", minor);
+       if (IS_ERR(fw_mgmt->class_device)) {
+               ret = PTR_ERR(fw_mgmt->class_device);
+               goto err_del_cdev;
+       }
+
+       return 0;
+
+err_del_cdev:
+       cdev_del(&fw_mgmt->cdev);
+err_remove_ida:
+       ida_simple_remove(&fw_mgmt_minors_map, minor);
+err_connection_disable:
+       gb_connection_disable(connection);
+err_list_del:
+       mutex_lock(&list_mutex);
+       list_del(&fw_mgmt->node);
+       mutex_unlock(&list_mutex);
+
+       put_fw_mgmt(fw_mgmt);
+
+       return ret;
+}
+
+void gb_fw_mgmt_connection_exit(struct gb_connection *connection)
+{
+       struct fw_mgmt *fw_mgmt;
+
+       if (!connection)
+               return;
+
+       fw_mgmt = gb_connection_get_data(connection);
+
+       device_destroy(fw_mgmt_class, fw_mgmt->dev_num);
+       cdev_del(&fw_mgmt->cdev);
+       ida_simple_remove(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num));
+
+       /*
+        * Disallow any new ioctl operations on the char device and wait for
+        * existing ones to finish.
+        */
+       mutex_lock(&fw_mgmt->mutex);
+       fw_mgmt->disabled = true;
+       mutex_unlock(&fw_mgmt->mutex);
+
+       /* All pending greybus operations should have finished by now */
+       gb_connection_disable(fw_mgmt->connection);
+
+       /* Disallow new users to get access to the fw_mgmt structure */
+       mutex_lock(&list_mutex);
+       list_del(&fw_mgmt->node);
+       mutex_unlock(&list_mutex);
+
+       /*
+        * All current users of fw_mgmt would have taken a reference to it by
+        * now, we can drop our reference and wait the last user will get
+        * fw_mgmt freed.
+        */
+       put_fw_mgmt(fw_mgmt);
+}
+
+int fw_mgmt_init(void)
+{
+       int ret;
+
+       fw_mgmt_class = class_create(THIS_MODULE, "gb_fw_mgmt");
+       if (IS_ERR(fw_mgmt_class))
+               return PTR_ERR(fw_mgmt_class);
+
+       ret = alloc_chrdev_region(&fw_mgmt_dev_num, 0, NUM_MINORS,
+                                 "gb_fw_mgmt");
+       if (ret)
+               goto err_remove_class;
+
+       return 0;
+
+err_remove_class:
+       class_destroy(fw_mgmt_class);
+       return ret;
+}
+
+void fw_mgmt_exit(void)
+{
+       unregister_chrdev_region(fw_mgmt_dev_num, NUM_MINORS);
+       class_destroy(fw_mgmt_class);
+       ida_destroy(&fw_mgmt_minors_map);
+}
diff --git a/drivers/staging/greybus/gb-camera.h b/drivers/staging/greybus/gb-camera.h
new file mode 100644 (file)
index 0000000..d45dabc
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Greybus Camera protocol driver.
+ *
+ * Copyright 2015 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+#ifndef __GB_CAMERA_H
+#define __GB_CAMERA_H
+
+#include <linux/v4l2-mediabus.h>
+
+/* Input flags need to be set from the caller */
+#define GB_CAMERA_IN_FLAG_TEST         (1 << 0)
+/* Output flags returned */
+#define GB_CAMERA_OUT_FLAG_ADJUSTED    (1 << 0)
+
+/**
+ * struct gb_camera_stream - Represents greybus camera stream.
+ * @width: Stream width in pixels.
+ * @height: Stream height in pixels.
+ * @pixel_code: Media bus pixel code.
+ * @vc: MIPI CSI virtual channel.
+ * @dt: MIPI CSI data types. Most formats use a single data type, in which case
+ *      the second element will be ignored.
+ * @max_size: Maximum size of a frame in bytes. The camera module guarantees
+ *            that all data between the Frame Start and Frame End packet for
+ *            the associated virtual channel and data type(s) will not exceed
+ *            this size.
+ */
+struct gb_camera_stream {
+       unsigned int width;
+       unsigned int height;
+       enum v4l2_mbus_pixelcode pixel_code;
+       unsigned int vc;
+       unsigned int dt[2];
+       unsigned int max_size;
+};
+
+/**
+ * struct gb_camera_csi_params - CSI configuration parameters
+ * @num_lanes: number of CSI data lanes
+ * @clk_freq: CSI clock frequency in Hz
+ */
+struct gb_camera_csi_params {
+       unsigned int num_lanes;
+       unsigned int clk_freq;
+};
+
+/**
+ * struct gb_camera_ops - Greybus camera operations, used by the Greybus camera
+ *                        driver to expose operations to the host camera driver.
+ * @capabilities: Retrieve camera capabilities and store them in the buffer
+ *                'buf' capabilities. The buffer maximum size is specified by
+ *                the caller in the 'size' parameter, and the effective
+ *                capabilities size is returned from the function. If the buffer
+ *                size is too small to hold the capabilities an error is
+ *                returned and the buffer is left untouched.
+ *
+ * @configure_streams: Negotiate configuration and prepare the module for video
+ *                     capture. The caller specifies the number of streams it
+ *                     requests in the 'nstreams' argument and the associated
+ *                     streams configurations in the 'streams' argument. The
+ *                     GB_CAMERA_IN_FLAG_TEST 'flag' can be set to test a
+ *                     configuration without applying it, otherwise the
+ *                     configuration is applied by the module. The module can
+ *                     decide to modify the requested configuration, including
+ *                     using a different number of streams. In that case the
+ *                     modified configuration won't be applied, the
+ *                     GB_CAMERA_OUT_FLAG_ADJUSTED 'flag' will be set upon
+ *                     return, and the modified configuration and number of
+ *                     streams stored in 'streams' and 'array'. The module
+ *                     returns its CSI-2 bus parameters in the 'csi_params'
+ *                     structure in all cases.
+ *
+ * @capture: Submit a capture request. The supplied 'request_id' must be unique
+ *           and higher than the IDs of all the previously submitted requests.
+ *           The 'streams' argument specifies which streams are affected by the
+ *           request in the form of a bitmask, with bits corresponding to the
+ *           configured streams indexes. If the request contains settings, the
+ *           'settings' argument points to the settings buffer and its size is
+ *           specified by the 'settings_size' argument. Otherwise the 'settings'
+ *           argument should be set to NULL and 'settings_size' to 0.
+ *
+ * @flush: Flush the capture requests queue. Return the ID of the last request
+ *         that will processed by the device before it stops transmitting video
+ *         frames. All queued capture requests with IDs higher than the returned
+ *         ID will be dropped without being processed.
+ */
+struct gb_camera_ops {
+       ssize_t (*capabilities)(void *priv, char *buf, size_t len);
+       int (*configure_streams)(void *priv, unsigned int *nstreams,
+                       unsigned int *flags, struct gb_camera_stream *streams,
+                       struct gb_camera_csi_params *csi_params);
+       int (*capture)(void *priv, u32 request_id,
+                       unsigned int streams, unsigned int num_frames,
+                       size_t settings_size, const void *settings);
+       int (*flush)(void *priv, u32 *request_id);
+};
+
+/**
+ * struct gb_camera_module - Represents greybus camera module.
+ * @priv: Module private data, passed to all camera operations.
+ * @ops: Greybus camera operation callbacks.
+ * @interface_id: Interface id of the module.
+ * @refcount: Reference counting object.
+ * @release: Module release function.
+ * @list: List entry in the camera modules list.
+ */
+struct gb_camera_module {
+       void *priv;
+       const struct gb_camera_ops *ops;
+
+       unsigned int interface_id;
+       struct kref refcount;
+       void (*release)(struct kref *kref);
+       struct list_head list; /* Global list */
+};
+
+#define gb_camera_call(f, op, args...)      \
+       (!(f) ? -ENODEV : (((f)->ops->op) ?  \
+       (f)->ops->op((f)->priv, ##args) : -ENOIOCTLCMD))
+
+int gb_camera_register(struct gb_camera_module *module);
+int gb_camera_unregister(struct gb_camera_module *module);
+
+#endif /* __GB_CAMERA_H */
diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
new file mode 100644 (file)
index 0000000..478c162
--- /dev/null
@@ -0,0 +1,360 @@
+/*
+ * Greybus Bridged-Phy Bus driver
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+
+#define GB_GBPHY_AUTOSUSPEND_MS        3000
+
+struct gbphy_host {
+       struct gb_bundle *bundle;
+       struct list_head devices;
+};
+
+static DEFINE_IDA(gbphy_id);
+
+static ssize_t protocol_id_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+
+       return sprintf(buf, "0x%02x\n", gbphy_dev->cport_desc->protocol_id);
+}
+static DEVICE_ATTR_RO(protocol_id);
+
+static struct attribute *gbphy_dev_attrs[] = {
+       &dev_attr_protocol_id.attr,
+       NULL,
+};
+
+ATTRIBUTE_GROUPS(gbphy_dev);
+
+static void gbphy_dev_release(struct device *dev)
+{
+       struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+
+       ida_simple_remove(&gbphy_id, gbphy_dev->id);
+       kfree(gbphy_dev);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int gb_gbphy_idle(struct device *dev)
+{
+       pm_runtime_mark_last_busy(dev);
+       pm_request_autosuspend(dev);
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_gbphy_pm_ops = {
+       SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend,
+                          pm_generic_runtime_resume,
+                          gb_gbphy_idle)
+};
+
+static struct device_type greybus_gbphy_dev_type = {
+       .name    =      "gbphy_device",
+       .release =      gbphy_dev_release,
+       .pm     =       &gb_gbphy_pm_ops,
+};
+
+static int gbphy_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+       struct greybus_descriptor_cport *cport_desc = gbphy_dev->cport_desc;
+       struct gb_bundle *bundle = gbphy_dev->bundle;
+       struct gb_interface *intf = bundle->intf;
+       struct gb_module *module = intf->module;
+       struct gb_host_device *hd = intf->hd;
+
+       if (add_uevent_var(env, "BUS=%u", hd->bus_id))
+               return -ENOMEM;
+       if (add_uevent_var(env, "MODULE=%u", module->module_id))
+               return -ENOMEM;
+       if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
+               return -ENOMEM;
+       if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
+                          intf->vendor_id, intf->product_id))
+               return -ENOMEM;
+       if (add_uevent_var(env, "BUNDLE=%u", gbphy_dev->bundle->id))
+               return -ENOMEM;
+       if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
+               return -ENOMEM;
+       if (add_uevent_var(env, "GBPHY=%u", gbphy_dev->id))
+               return -ENOMEM;
+       if (add_uevent_var(env, "PROTOCOL_ID=%02x", cport_desc->protocol_id))
+               return -ENOMEM;
+
+       return 0;
+}
+
+static const struct gbphy_device_id *
+gbphy_dev_match_id(struct gbphy_device *gbphy_dev, struct gbphy_driver *gbphy_drv)
+{
+       const struct gbphy_device_id *id = gbphy_drv->id_table;
+
+       if (!id)
+               return NULL;
+
+       for (; id->protocol_id; id++)
+               if (id->protocol_id == gbphy_dev->cport_desc->protocol_id)
+                       return id;
+
+       return NULL;
+}
+
+static int gbphy_dev_match(struct device *dev, struct device_driver *drv)
+{
+       struct gbphy_driver *gbphy_drv = to_gbphy_driver(drv);
+       struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+       const struct gbphy_device_id *id;
+
+       id = gbphy_dev_match_id(gbphy_dev, gbphy_drv);
+       if (id)
+               return 1;
+
+       return 0;
+}
+
+static int gbphy_dev_probe(struct device *dev)
+{
+       struct gbphy_driver *gbphy_drv = to_gbphy_driver(dev->driver);
+       struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+       const struct gbphy_device_id *id;
+       int ret;
+
+       id = gbphy_dev_match_id(gbphy_dev, gbphy_drv);
+       if (!id)
+               return -ENODEV;
+
+       /* for old kernels we need get_sync to resume parent devices */
+       ret = gb_pm_runtime_get_sync(gbphy_dev->bundle);
+       if (ret < 0)
+               return ret;
+
+       pm_runtime_set_autosuspend_delay(dev, GB_GBPHY_AUTOSUSPEND_MS);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_get_noresume(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       /*
+        * Drivers should call put on the gbphy dev before returning
+        * from probe if they support runtime pm.
+        */
+       ret = gbphy_drv->probe(gbphy_dev, id);
+       if (ret) {
+               pm_runtime_disable(dev);
+               pm_runtime_set_suspended(dev);
+               pm_runtime_put_noidle(dev);
+               pm_runtime_dont_use_autosuspend(dev);
+       }
+
+       gb_pm_runtime_put_autosuspend(gbphy_dev->bundle);
+
+       return ret;
+}
+
+static int gbphy_dev_remove(struct device *dev)
+{
+       struct gbphy_driver *gbphy_drv = to_gbphy_driver(dev->driver);
+       struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+
+       gbphy_drv->remove(gbphy_dev);
+
+       pm_runtime_disable(dev);
+       pm_runtime_set_suspended(dev);
+       pm_runtime_put_noidle(dev);
+       pm_runtime_dont_use_autosuspend(dev);
+
+       return 0;
+}
+
+static struct bus_type gbphy_bus_type = {
+       .name =         "gbphy",
+       .match =        gbphy_dev_match,
+       .probe =        gbphy_dev_probe,
+       .remove =       gbphy_dev_remove,
+       .uevent =       gbphy_dev_uevent,
+};
+
+int gb_gbphy_register_driver(struct gbphy_driver *driver,
+                            struct module *owner, const char *mod_name)
+{
+       int retval;
+
+       if (greybus_disabled())
+               return -ENODEV;
+
+       driver->driver.bus = &gbphy_bus_type;
+       driver->driver.name = driver->name;
+       driver->driver.owner = owner;
+       driver->driver.mod_name = mod_name;
+
+       retval = driver_register(&driver->driver);
+       if (retval)
+               return retval;
+
+       pr_info("registered new driver %s\n", driver->name);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gb_gbphy_register_driver);
+
+void gb_gbphy_deregister_driver(struct gbphy_driver *driver)
+{
+       driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(gb_gbphy_deregister_driver);
+
+static struct gbphy_device *gb_gbphy_create_dev(struct gb_bundle *bundle,
+                               struct greybus_descriptor_cport *cport_desc)
+{
+       struct gbphy_device *gbphy_dev;
+       int retval;
+       int id;
+
+       id = ida_simple_get(&gbphy_id, 1, 0, GFP_KERNEL);
+       if (id < 0)
+               return ERR_PTR(id);
+
+       gbphy_dev = kzalloc(sizeof(*gbphy_dev), GFP_KERNEL);
+       if (!gbphy_dev) {
+               ida_simple_remove(&gbphy_id, id);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       gbphy_dev->id = id;
+       gbphy_dev->bundle = bundle;
+       gbphy_dev->cport_desc = cport_desc;
+       gbphy_dev->dev.parent = &bundle->dev;
+       gbphy_dev->dev.bus = &gbphy_bus_type;
+       gbphy_dev->dev.type = &greybus_gbphy_dev_type;
+       gbphy_dev->dev.groups = gbphy_dev_groups;
+       gbphy_dev->dev.dma_mask = bundle->dev.dma_mask;
+       dev_set_name(&gbphy_dev->dev, "gbphy%d", id);
+
+       retval = device_register(&gbphy_dev->dev);
+       if (retval) {
+               put_device(&gbphy_dev->dev);
+               return ERR_PTR(retval);
+       }
+
+       return gbphy_dev;
+}
+
+static void gb_gbphy_disconnect(struct gb_bundle *bundle)
+{
+       struct gbphy_host *gbphy_host = greybus_get_drvdata(bundle);
+       struct gbphy_device *gbphy_dev, *temp;
+       int ret;
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret < 0)
+               gb_pm_runtime_get_noresume(bundle);
+
+       list_for_each_entry_safe(gbphy_dev, temp, &gbphy_host->devices, list) {
+               list_del(&gbphy_dev->list);
+               device_unregister(&gbphy_dev->dev);
+       }
+
+       kfree(gbphy_host);
+}
+
+static int gb_gbphy_probe(struct gb_bundle *bundle,
+                         const struct greybus_bundle_id *id)
+{
+       struct gbphy_host *gbphy_host;
+       struct gbphy_device *gbphy_dev;
+       int i;
+
+       if (bundle->num_cports == 0)
+               return -ENODEV;
+
+       gbphy_host = kzalloc(sizeof(*gbphy_host), GFP_KERNEL);
+       if (!gbphy_host)
+               return -ENOMEM;
+
+       gbphy_host->bundle = bundle;
+       INIT_LIST_HEAD(&gbphy_host->devices);
+       greybus_set_drvdata(bundle, gbphy_host);
+
+       /*
+        * Create a bunch of children devices, one per cport, and bind the
+        * bridged phy drivers to them.
+        */
+       for (i = 0; i < bundle->num_cports; ++i) {
+               gbphy_dev = gb_gbphy_create_dev(bundle, &bundle->cport_desc[i]);
+               if (IS_ERR(gbphy_dev)) {
+                       gb_gbphy_disconnect(bundle);
+                       return PTR_ERR(gbphy_dev);
+               }
+               list_add(&gbphy_dev->list, &gbphy_host->devices);
+       }
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return 0;
+}
+
+static const struct greybus_bundle_id gb_gbphy_id_table[] = {
+       { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_BRIDGED_PHY) },
+       { },
+};
+MODULE_DEVICE_TABLE(greybus, gb_gbphy_id_table);
+
+static struct greybus_driver gb_gbphy_driver = {
+       .name           = "gbphy",
+       .probe          = gb_gbphy_probe,
+       .disconnect     = gb_gbphy_disconnect,
+       .id_table       = gb_gbphy_id_table,
+};
+
+static int __init gbphy_init(void)
+{
+       int retval;
+
+       retval = bus_register(&gbphy_bus_type);
+       if (retval) {
+               pr_err("gbphy bus register failed (%d)\n", retval);
+               return retval;
+       }
+
+       retval = greybus_register(&gb_gbphy_driver);
+       if (retval) {
+               pr_err("error registering greybus driver\n");
+               goto error_gbphy;
+       }
+
+       return 0;
+
+error_gbphy:
+       bus_unregister(&gbphy_bus_type);
+       ida_destroy(&gbphy_id);
+       return retval;
+}
+module_init(gbphy_init);
+
+static void __exit gbphy_exit(void)
+{
+       greybus_deregister(&gb_gbphy_driver);
+       bus_unregister(&gbphy_bus_type);
+       ida_destroy(&gbphy_id);
+}
+module_exit(gbphy_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/gbphy.h b/drivers/staging/greybus/gbphy.h
new file mode 100644 (file)
index 0000000..e251186
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Greybus Bridged-Phy Bus driver
+ *
+ * Copyright 2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __GBPHY_H
+#define __GBPHY_H
+
+struct gbphy_device {
+       u32 id;
+       struct greybus_descriptor_cport *cport_desc;
+       struct gb_bundle *bundle;
+       struct list_head list;
+       struct device dev;
+};
+#define to_gbphy_dev(d) container_of(d, struct gbphy_device, dev)
+
+static inline void *gb_gbphy_get_data(struct gbphy_device *gdev)
+{
+       return dev_get_drvdata(&gdev->dev);
+}
+
+static inline void gb_gbphy_set_data(struct gbphy_device *gdev, void *data)
+{
+       dev_set_drvdata(&gdev->dev, data);
+}
+
+struct gbphy_device_id {
+       __u8 protocol_id;
+};
+
+#define GBPHY_PROTOCOL(p)              \
+       .protocol_id    = (p),
+
+struct gbphy_driver {
+       const char *name;
+       int (*probe)(struct gbphy_device *,
+                    const struct gbphy_device_id *id);
+       void (*remove)(struct gbphy_device *);
+       const struct gbphy_device_id *id_table;
+
+       struct device_driver driver;
+};
+#define to_gbphy_driver(d) container_of(d, struct gbphy_driver, driver)
+
+int gb_gbphy_register_driver(struct gbphy_driver *driver,
+                            struct module *owner, const char *mod_name);
+void gb_gbphy_deregister_driver(struct gbphy_driver *driver);
+
+#define gb_gbphy_register(driver) \
+       gb_gbphy_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
+#define gb_gbphy_deregister(driver) \
+       gb_gbphy_deregister_driver(driver)
+
+/**
+ * module_gbphy_driver() - Helper macro for registering a gbphy driver
+ * @__gbphy_driver: gbphy_driver structure
+ *
+ * Helper macro for gbphy drivers to set up proper module init / exit
+ * functions.  Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_gbphy_driver(__gbphy_driver)    \
+       module_driver(__gbphy_driver, gb_gbphy_register, gb_gbphy_deregister)
+
+#ifdef CONFIG_PM_RUNTIME
+static inline int gbphy_runtime_get_sync(struct gbphy_device *gbphy_dev)
+{
+       struct device *dev = &gbphy_dev->dev;
+       int ret;
+
+       ret = pm_runtime_get_sync(dev);
+       if (ret < 0) {
+               dev_err(dev, "pm_runtime_get_sync failed: %d\n", ret);
+               pm_runtime_put_noidle(dev);
+               return ret;
+       }
+
+       return 0;
+}
+
+static inline void gbphy_runtime_put_autosuspend(struct gbphy_device *gbphy_dev)
+{
+       struct device *dev = &gbphy_dev->dev;
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+}
+
+static inline void gbphy_runtime_get_noresume(struct gbphy_device *gbphy_dev)
+{
+       pm_runtime_get_noresume(&gbphy_dev->dev);
+}
+
+static inline void gbphy_runtime_put_noidle(struct gbphy_device *gbphy_dev)
+{
+       pm_runtime_put_noidle(&gbphy_dev->dev);
+}
+#else
+static inline int gbphy_runtime_get_sync(struct gbphy_device *gbphy_dev) { return 0; }
+static inline void gbphy_runtime_put_autosuspend(struct gbphy_device *gbphy_dev) {}
+static inline void gbphy_runtime_get_noresume(struct gbphy_device *gbphy_dev) {}
+static inline void gbphy_runtime_put_noidle(struct gbphy_device *gbphy_dev) {}
+#endif
+
+#endif /* __GBPHY_H */
+
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
new file mode 100644 (file)
index 0000000..294e2f5
--- /dev/null
@@ -0,0 +1,775 @@
+/*
+ * GPIO Greybus driver.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mutex.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+
+struct gb_gpio_line {
+       /* The following has to be an array of line_max entries */
+       /* --> make them just a flags field */
+       u8                      active:    1,
+                               direction: 1,   /* 0 = output, 1 = input */
+                               value:     1;   /* 0 = low, 1 = high */
+       u16                     debounce_usec;
+
+       u8                      irq_type;
+       bool                    irq_type_pending;
+       bool                    masked;
+       bool                    masked_pending;
+};
+
+struct gb_gpio_controller {
+       struct gbphy_device     *gbphy_dev;
+       struct gb_connection    *connection;
+       u8                      line_max;       /* max line number */
+       struct gb_gpio_line     *lines;
+
+       struct gpio_chip        chip;
+       struct irq_chip         irqc;
+       struct irq_chip         *irqchip;
+       struct irq_domain       *irqdomain;
+       unsigned int            irq_base;
+       irq_flow_handler_t      irq_handler;
+       unsigned int            irq_default_type;
+       struct mutex            irq_lock;
+};
+#define gpio_chip_to_gb_gpio_controller(chip) \
+       container_of(chip, struct gb_gpio_controller, chip)
+#define irq_data_to_gpio_chip(d) (d->domain->host_data)
+
+static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
+{
+       struct gb_gpio_line_count_response response;
+       int ret;
+
+       ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_LINE_COUNT,
+                               NULL, 0, &response, sizeof(response));
+       if (!ret)
+               ggc->line_max = response.count;
+       return ret;
+}
+
+static int gb_gpio_activate_operation(struct gb_gpio_controller *ggc, u8 which)
+{
+       struct gb_gpio_activate_request request;
+       struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
+       int ret;
+
+       ret = gbphy_runtime_get_sync(gbphy_dev);
+       if (ret)
+               return ret;
+
+       request.which = which;
+       ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_ACTIVATE,
+                                &request, sizeof(request), NULL, 0);
+       if (ret) {
+               gbphy_runtime_put_autosuspend(gbphy_dev);
+               return ret;
+       }
+
+       ggc->lines[which].active = true;
+
+       return 0;
+}
+
+static void gb_gpio_deactivate_operation(struct gb_gpio_controller *ggc,
+                                       u8 which)
+{
+       struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
+       struct device *dev = &gbphy_dev->dev;
+       struct gb_gpio_deactivate_request request;
+       int ret;
+
+       request.which = which;
+       ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DEACTIVATE,
+                                &request, sizeof(request), NULL, 0);
+       if (ret) {
+               dev_err(dev, "failed to deactivate gpio %u\n", which);
+               goto out_pm_put;
+       }
+
+       ggc->lines[which].active = false;
+
+out_pm_put:
+       gbphy_runtime_put_autosuspend(gbphy_dev);
+}
+
+static int gb_gpio_get_direction_operation(struct gb_gpio_controller *ggc,
+                                       u8 which)
+{
+       struct device *dev = &ggc->gbphy_dev->dev;
+       struct gb_gpio_get_direction_request request;
+       struct gb_gpio_get_direction_response response;
+       int ret;
+       u8 direction;
+
+       request.which = which;
+       ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_DIRECTION,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret)
+               return ret;
+
+       direction = response.direction;
+       if (direction && direction != 1) {
+               dev_warn(dev, "gpio %u direction was %u (should be 0 or 1)\n",
+                        which, direction);
+       }
+       ggc->lines[which].direction = direction ? 1 : 0;
+       return 0;
+}
+
+static int gb_gpio_direction_in_operation(struct gb_gpio_controller *ggc,
+                                       u8 which)
+{
+       struct gb_gpio_direction_in_request request;
+       int ret;
+
+       request.which = which;
+       ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_IN,
+                               &request, sizeof(request), NULL, 0);
+       if (!ret)
+               ggc->lines[which].direction = 1;
+       return ret;
+}
+
+static int gb_gpio_direction_out_operation(struct gb_gpio_controller *ggc,
+                                       u8 which, bool value_high)
+{
+       struct gb_gpio_direction_out_request request;
+       int ret;
+
+       request.which = which;
+       request.value = value_high ? 1 : 0;
+       ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_OUT,
+                               &request, sizeof(request), NULL, 0);
+       if (!ret)
+               ggc->lines[which].direction = 0;
+       return ret;
+}
+
+static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
+                                       u8 which)
+{
+       struct device *dev = &ggc->gbphy_dev->dev;
+       struct gb_gpio_get_value_request request;
+       struct gb_gpio_get_value_response response;
+       int ret;
+       u8 value;
+
+       request.which = which;
+       ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_VALUE,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(dev, "failed to get value of gpio %u\n", which);
+               return ret;
+       }
+
+       value = response.value;
+       if (value && value != 1) {
+               dev_warn(dev, "gpio %u value was %u (should be 0 or 1)\n",
+                        which, value);
+       }
+       ggc->lines[which].value = value ? 1 : 0;
+       return 0;
+}
+
+static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
+                                       u8 which, bool value_high)
+{
+       struct device *dev = &ggc->gbphy_dev->dev;
+       struct gb_gpio_set_value_request request;
+       int ret;
+
+       if (ggc->lines[which].direction == 1) {
+               dev_warn(dev, "refusing to set value of input gpio %u\n",
+                        which);
+               return;
+       }
+
+       request.which = which;
+       request.value = value_high ? 1 : 0;
+       ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_VALUE,
+                               &request, sizeof(request), NULL, 0);
+       if (ret) {
+               dev_err(dev, "failed to set value of gpio %u\n", which);
+               return;
+       }
+
+       ggc->lines[which].value = request.value;
+}
+
+static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
+                                       u8 which, u16 debounce_usec)
+{
+       struct gb_gpio_set_debounce_request request;
+       int ret;
+
+       request.which = which;
+       request.usec = cpu_to_le16(debounce_usec);
+       ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_DEBOUNCE,
+                               &request, sizeof(request), NULL, 0);
+       if (!ret)
+               ggc->lines[which].debounce_usec = debounce_usec;
+       return ret;
+}
+
+static void _gb_gpio_irq_mask(struct gb_gpio_controller *ggc, u8 hwirq)
+{
+       struct device *dev = &ggc->gbphy_dev->dev;
+       struct gb_gpio_irq_mask_request request;
+       int ret;
+
+       request.which = hwirq;
+       ret = gb_operation_sync(ggc->connection,
+                               GB_GPIO_TYPE_IRQ_MASK,
+                               &request, sizeof(request), NULL, 0);
+       if (ret)
+               dev_err(dev, "failed to mask irq: %d\n", ret);
+}
+
+static void _gb_gpio_irq_unmask(struct gb_gpio_controller *ggc, u8 hwirq)
+{
+       struct device *dev = &ggc->gbphy_dev->dev;
+       struct gb_gpio_irq_unmask_request request;
+       int ret;
+
+       request.which = hwirq;
+       ret = gb_operation_sync(ggc->connection,
+                               GB_GPIO_TYPE_IRQ_UNMASK,
+                               &request, sizeof(request), NULL, 0);
+       if (ret)
+               dev_err(dev, "failed to unmask irq: %d\n", ret);
+}
+
+static void _gb_gpio_irq_set_type(struct gb_gpio_controller *ggc,
+                                       u8 hwirq, u8 type)
+{
+       struct device *dev = &ggc->gbphy_dev->dev;
+       struct gb_gpio_irq_type_request request;
+       int ret;
+
+       request.which = hwirq;
+       request.type = type;
+
+       ret = gb_operation_sync(ggc->connection,
+                               GB_GPIO_TYPE_IRQ_TYPE,
+                               &request, sizeof(request), NULL, 0);
+       if (ret)
+               dev_err(dev, "failed to set irq type: %d\n", ret);
+}
+
+static void gb_gpio_irq_mask(struct irq_data *d)
+{
+       struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+       struct gb_gpio_line *line = &ggc->lines[d->hwirq];
+
+       line->masked = true;
+       line->masked_pending = true;
+}
+
+static void gb_gpio_irq_unmask(struct irq_data *d)
+{
+       struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+       struct gb_gpio_line *line = &ggc->lines[d->hwirq];
+
+       line->masked = false;
+       line->masked_pending = true;
+}
+
+static int gb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+       struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+       struct gb_gpio_line *line = &ggc->lines[d->hwirq];
+       struct device *dev = &ggc->gbphy_dev->dev;
+       u8 irq_type;
+
+       switch (type) {
+       case IRQ_TYPE_NONE:
+               irq_type = GB_GPIO_IRQ_TYPE_NONE;
+               break;
+       case IRQ_TYPE_EDGE_RISING:
+               irq_type = GB_GPIO_IRQ_TYPE_EDGE_RISING;
+               break;
+       case IRQ_TYPE_EDGE_FALLING:
+               irq_type = GB_GPIO_IRQ_TYPE_EDGE_FALLING;
+               break;
+       case IRQ_TYPE_EDGE_BOTH:
+               irq_type = GB_GPIO_IRQ_TYPE_EDGE_BOTH;
+               break;
+       case IRQ_TYPE_LEVEL_LOW:
+               irq_type = GB_GPIO_IRQ_TYPE_LEVEL_LOW;
+               break;
+       case IRQ_TYPE_LEVEL_HIGH:
+               irq_type = GB_GPIO_IRQ_TYPE_LEVEL_HIGH;
+               break;
+       default:
+               dev_err(dev, "unsupported irq type: %u\n", type);
+               return -EINVAL;
+       }
+
+       line->irq_type = irq_type;
+       line->irq_type_pending = true;
+
+       return 0;
+}
+
+static void gb_gpio_irq_bus_lock(struct irq_data *d)
+{
+       struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+       mutex_lock(&ggc->irq_lock);
+}
+
+static void gb_gpio_irq_bus_sync_unlock(struct irq_data *d)
+{
+       struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+       struct gb_gpio_line *line = &ggc->lines[d->hwirq];
+
+       if (line->irq_type_pending) {
+               _gb_gpio_irq_set_type(ggc, d->hwirq, line->irq_type);
+               line->irq_type_pending = false;
+       }
+
+       if (line->masked_pending) {
+               if (line->masked)
+                       _gb_gpio_irq_mask(ggc, d->hwirq);
+               else
+                       _gb_gpio_irq_unmask(ggc, d->hwirq);
+               line->masked_pending = false;
+       }
+
+       mutex_unlock(&ggc->irq_lock);
+}
+
+static int gb_gpio_request_handler(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
+       struct device *dev = &ggc->gbphy_dev->dev;
+       struct gb_message *request;
+       struct gb_gpio_irq_event_request *event;
+       u8 type = op->type;
+       int irq;
+       struct irq_desc *desc;
+
+       if (type != GB_GPIO_TYPE_IRQ_EVENT) {
+               dev_err(dev, "unsupported unsolicited request: %u\n", type);
+               return -EINVAL;
+       }
+
+       request = op->request;
+
+       if (request->payload_size < sizeof(*event)) {
+               dev_err(dev, "short event received (%zu < %zu)\n",
+                       request->payload_size, sizeof(*event));
+               return -EINVAL;
+       }
+
+       event = request->payload;
+       if (event->which > ggc->line_max) {
+               dev_err(dev, "invalid hw irq: %d\n", event->which);
+               return -EINVAL;
+       }
+
+       irq = irq_find_mapping(ggc->irqdomain, event->which);
+       if (!irq) {
+               dev_err(dev, "failed to find IRQ\n");
+               return -EINVAL;
+       }
+       desc = irq_to_desc(irq);
+       if (!desc) {
+               dev_err(dev, "failed to look up irq\n");
+               return -EINVAL;
+       }
+
+       local_irq_disable();
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+       generic_handle_irq_desc(irq, desc);
+#else
+       generic_handle_irq_desc(desc);
+#endif
+       local_irq_enable();
+
+       return 0;
+}
+
+static int gb_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+       return gb_gpio_activate_operation(ggc, (u8)offset);
+}
+
+static void gb_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+       gb_gpio_deactivate_operation(ggc, (u8)offset);
+}
+
+static int gb_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+       u8 which;
+       int ret;
+
+       which = (u8)offset;
+       ret = gb_gpio_get_direction_operation(ggc, which);
+       if (ret)
+               return ret;
+
+       return ggc->lines[which].direction ? 1 : 0;
+}
+
+static int gb_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+       return gb_gpio_direction_in_operation(ggc, (u8)offset);
+}
+
+static int gb_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+                                       int value)
+{
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+       return gb_gpio_direction_out_operation(ggc, (u8)offset, !!value);
+}
+
+static int gb_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+       u8 which;
+       int ret;
+
+       which = (u8)offset;
+       ret = gb_gpio_get_value_operation(ggc, which);
+       if (ret)
+               return ret;
+
+       return ggc->lines[which].value;
+}
+
+static void gb_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+       gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
+}
+
+static int gb_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+                                       unsigned debounce)
+{
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+       u16 usec;
+
+       if (debounce > U16_MAX)
+               return -EINVAL;
+       usec = (u16)debounce;
+
+       return gb_gpio_set_debounce_operation(ggc, (u8)offset, usec);
+}
+
+static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
+{
+       int ret;
+
+       /* Now find out how many lines there are */
+       ret = gb_gpio_line_count_operation(ggc);
+       if (ret)
+               return ret;
+
+       ggc->lines = kcalloc(ggc->line_max + 1, sizeof(*ggc->lines),
+                            GFP_KERNEL);
+       if (!ggc->lines)
+               return -ENOMEM;
+
+       return ret;
+}
+
+/**
+ * gb_gpio_irq_map() - maps an IRQ into a GB gpio irqchip
+ * @d: the irqdomain used by this irqchip
+ * @irq: the global irq number used by this GB gpio irqchip irq
+ * @hwirq: the local IRQ/GPIO line offset on this GB gpio
+ *
+ * This function will set up the mapping for a certain IRQ line on a
+ * GB gpio by assigning the GB gpio as chip data, and using the irqchip
+ * stored inside the GB gpio.
+ */
+static int gb_gpio_irq_map(struct irq_domain *domain, unsigned int irq,
+                          irq_hw_number_t hwirq)
+{
+       struct gpio_chip *chip = domain->host_data;
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+       irq_set_chip_data(irq, ggc);
+       irq_set_chip_and_handler(irq, ggc->irqchip, ggc->irq_handler);
+       irq_set_noprobe(irq);
+       /*
+        * No set-up of the hardware will happen if IRQ_TYPE_NONE
+        * is passed as default type.
+        */
+       if (ggc->irq_default_type != IRQ_TYPE_NONE)
+               irq_set_irq_type(irq, ggc->irq_default_type);
+
+       return 0;
+}
+
+static void gb_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+       irq_set_chip_and_handler(irq, NULL, NULL);
+       irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops gb_gpio_domain_ops = {
+       .map    = gb_gpio_irq_map,
+       .unmap  = gb_gpio_irq_unmap,
+};
+
+/**
+ * gb_gpio_irqchip_remove() - removes an irqchip added to a gb_gpio_controller
+ * @ggc: the gb_gpio_controller to remove the irqchip from
+ *
+ * This is called only from gb_gpio_remove()
+ */
+static void gb_gpio_irqchip_remove(struct gb_gpio_controller *ggc)
+{
+       unsigned int offset;
+
+       /* Remove all IRQ mappings and delete the domain */
+       if (ggc->irqdomain) {
+               for (offset = 0; offset < (ggc->line_max + 1); offset++)
+                       irq_dispose_mapping(irq_find_mapping(ggc->irqdomain, offset));
+               irq_domain_remove(ggc->irqdomain);
+       }
+
+       if (ggc->irqchip) {
+               ggc->irqchip = NULL;
+       }
+}
+
+/**
+ * gb_gpio_irqchip_add() - adds an irqchip to a gpio chip
+ * @chip: the gpio chip to add the irqchip to
+ * @irqchip: the irqchip to add to the adapter
+ * @first_irq: if not dynamically assigned, the base (first) IRQ to
+ * allocate gpio irqs from
+ * @handler: the irq handler to use (often a predefined irq core function)
+ * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
+ * to have the core avoid setting up any default type in the hardware.
+ *
+ * This function closely associates a certain irqchip with a certain
+ * gpio chip, providing an irq domain to translate the local IRQs to
+ * global irqs, and making sure that the gpio chip
+ * is passed as chip data to all related functions. Driver callbacks
+ * need to use container_of() to get their local state containers back
+ * from the gpio chip passed as chip data. An irqdomain will be stored
+ * in the gpio chip that shall be used by the driver to handle IRQ number
+ * translation. The gpio chip will need to be initialized and registered
+ * before calling this function.
+ */
+static int gb_gpio_irqchip_add(struct gpio_chip *chip,
+                        struct irq_chip *irqchip,
+                        unsigned int first_irq,
+                        irq_flow_handler_t handler,
+                        unsigned int type)
+{
+       struct gb_gpio_controller *ggc;
+       unsigned int offset;
+       unsigned irq_base;
+
+       if (!chip || !irqchip)
+               return -EINVAL;
+
+       ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+       ggc->irqchip = irqchip;
+       ggc->irq_handler = handler;
+       ggc->irq_default_type = type;
+       ggc->irqdomain = irq_domain_add_simple(NULL,
+                                       ggc->line_max + 1, first_irq,
+                                       &gb_gpio_domain_ops, chip);
+       if (!ggc->irqdomain) {
+               ggc->irqchip = NULL;
+               return -EINVAL;
+       }
+
+       /*
+        * Prepare the mapping since the irqchip shall be orthogonal to
+        * any gpio calls. If the first_irq was zero, this is
+        * necessary to allocate descriptors for all IRQs.
+        */
+       for (offset = 0; offset < (ggc->line_max + 1); offset++) {
+               irq_base = irq_create_mapping(ggc->irqdomain, offset);
+               if (offset == 0)
+                       ggc->irq_base = irq_base;
+       }
+
+       return 0;
+}
+
+static int gb_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+       struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+       return irq_find_mapping(ggc->irqdomain, offset);
+}
+
+static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
+                        const struct gbphy_device_id *id)
+{
+       struct gb_connection *connection;
+       struct gb_gpio_controller *ggc;
+       struct gpio_chip *gpio;
+       struct irq_chip *irqc;
+       int ret;
+
+       ggc = kzalloc(sizeof(*ggc), GFP_KERNEL);
+       if (!ggc)
+               return -ENOMEM;
+
+       connection = gb_connection_create(gbphy_dev->bundle,
+                                         le16_to_cpu(gbphy_dev->cport_desc->id),
+                                         gb_gpio_request_handler);
+       if (IS_ERR(connection)) {
+               ret = PTR_ERR(connection);
+               goto exit_ggc_free;
+       }
+
+       ggc->connection = connection;
+       gb_connection_set_data(connection, ggc);
+       ggc->gbphy_dev = gbphy_dev;
+       gb_gbphy_set_data(gbphy_dev, ggc);
+
+       ret = gb_connection_enable_tx(connection);
+       if (ret)
+               goto exit_connection_destroy;
+
+       ret = gb_gpio_controller_setup(ggc);
+       if (ret)
+               goto exit_connection_disable;
+
+       irqc = &ggc->irqc;
+       irqc->irq_mask = gb_gpio_irq_mask;
+       irqc->irq_unmask = gb_gpio_irq_unmask;
+       irqc->irq_set_type = gb_gpio_irq_set_type;
+       irqc->irq_bus_lock = gb_gpio_irq_bus_lock;
+       irqc->irq_bus_sync_unlock = gb_gpio_irq_bus_sync_unlock;
+       irqc->name = "greybus_gpio";
+
+       mutex_init(&ggc->irq_lock);
+
+       gpio = &ggc->chip;
+
+       gpio->label = "greybus_gpio";
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
+       gpio->parent = &gbphy_dev->dev;
+#else
+       gpio->dev = &gbphy_dev->dev;
+#endif
+       gpio->owner = THIS_MODULE;
+
+       gpio->request = gb_gpio_request;
+       gpio->free = gb_gpio_free;
+       gpio->get_direction = gb_gpio_get_direction;
+       gpio->direction_input = gb_gpio_direction_input;
+       gpio->direction_output = gb_gpio_direction_output;
+       gpio->get = gb_gpio_get;
+       gpio->set = gb_gpio_set;
+       gpio->set_debounce = gb_gpio_set_debounce;
+       gpio->to_irq = gb_gpio_to_irq;
+       gpio->base = -1;                /* Allocate base dynamically */
+       gpio->ngpio = ggc->line_max + 1;
+       gpio->can_sleep = true;
+
+       ret = gb_connection_enable(connection);
+       if (ret)
+               goto exit_line_free;
+
+       ret = gb_gpio_irqchip_add(gpio, irqc, 0,
+                                  handle_level_irq, IRQ_TYPE_NONE);
+       if (ret) {
+               dev_err(&connection->bundle->dev,
+                       "failed to add irq chip: %d\n", ret);
+               goto exit_line_free;
+       }
+
+       ret = gpiochip_add(gpio);
+       if (ret) {
+               dev_err(&connection->bundle->dev,
+                       "failed to add gpio chip: %d\n", ret);
+               goto exit_gpio_irqchip_remove;
+       }
+
+       gbphy_runtime_put_autosuspend(gbphy_dev);
+       return 0;
+
+exit_gpio_irqchip_remove:
+       gb_gpio_irqchip_remove(ggc);
+exit_line_free:
+       kfree(ggc->lines);
+exit_connection_disable:
+       gb_connection_disable(connection);
+exit_connection_destroy:
+       gb_connection_destroy(connection);
+exit_ggc_free:
+       kfree(ggc);
+       return ret;
+}
+
+static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
+{
+       struct gb_gpio_controller *ggc = gb_gbphy_get_data(gbphy_dev);
+       struct gb_connection *connection = ggc->connection;
+       int ret;
+
+       ret = gbphy_runtime_get_sync(gbphy_dev);
+       if (ret)
+               gbphy_runtime_get_noresume(gbphy_dev);
+
+       gb_connection_disable_rx(connection);
+       gb_gpiochip_remove(&ggc->chip);
+       gb_gpio_irqchip_remove(ggc);
+       gb_connection_disable(connection);
+       gb_connection_destroy(connection);
+       kfree(ggc->lines);
+       kfree(ggc);
+}
+
+static const struct gbphy_device_id gb_gpio_id_table[] = {
+       { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_GPIO) },
+       { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_gpio_id_table);
+
+static struct gbphy_driver gpio_driver = {
+       .name           = "gpio",
+       .probe          = gb_gpio_probe,
+       .remove         = gb_gpio_remove,
+       .id_table       = gb_gpio_id_table,
+};
+
+module_gbphy_driver(gpio_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/greybus.h b/drivers/staging/greybus/greybus.h
new file mode 100644 (file)
index 0000000..3e32028
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+ * Greybus driver and device API
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __LINUX_GREYBUS_H
+#define __LINUX_GREYBUS_H
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/idr.h>
+
+#include "kernel_ver.h"
+#include "greybus_id.h"
+#include "greybus_manifest.h"
+#include "greybus_protocols.h"
+#include "manifest.h"
+#include "hd.h"
+#include "svc.h"
+#include "control.h"
+#include "module.h"
+#include "interface.h"
+#include "bundle.h"
+#include "connection.h"
+#include "operation.h"
+#include "timesync.h"
+
+/* Matches up with the Greybus Protocol specification document */
+#define GREYBUS_VERSION_MAJOR  0x00
+#define GREYBUS_VERSION_MINOR  0x01
+
+#define GREYBUS_ID_MATCH_DEVICE \
+       (GREYBUS_ID_MATCH_VENDOR | GREYBUS_ID_MATCH_PRODUCT)
+
+#define GREYBUS_DEVICE(v, p)                                   \
+       .match_flags    = GREYBUS_ID_MATCH_DEVICE,              \
+       .vendor         = (v),                                  \
+       .product        = (p),
+
+#define GREYBUS_DEVICE_CLASS(c)                                        \
+       .match_flags    = GREYBUS_ID_MATCH_CLASS,               \
+       .class          = (c),
+
+/* Maximum number of CPorts */
+#define CPORT_ID_MAX   4095            /* UniPro max id is 4095 */
+#define CPORT_ID_BAD   U16_MAX
+
+struct greybus_driver {
+       const char *name;
+
+       int (*probe)(struct gb_bundle *bundle,
+                    const struct greybus_bundle_id *id);
+       void (*disconnect)(struct gb_bundle *bundle);
+
+       const struct greybus_bundle_id *id_table;
+
+       struct device_driver driver;
+};
+#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver)
+
+static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data)
+{
+       dev_set_drvdata(&bundle->dev, data);
+}
+
+static inline void *greybus_get_drvdata(struct gb_bundle *bundle)
+{
+       return dev_get_drvdata(&bundle->dev);
+}
+
+/* Don't call these directly, use the module_greybus_driver() macro instead */
+int greybus_register_driver(struct greybus_driver *driver,
+                           struct module *module, const char *mod_name);
+void greybus_deregister_driver(struct greybus_driver *driver);
+
+/* define to get proper THIS_MODULE and KBUILD_MODNAME values */
+#define greybus_register(driver) \
+       greybus_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
+#define greybus_deregister(driver) \
+       greybus_deregister_driver(driver)
+
+/**
+ * module_greybus_driver() - Helper macro for registering a Greybus driver
+ * @__greybus_driver: greybus_driver structure
+ *
+ * Helper macro for Greybus drivers to set up proper module init / exit
+ * functions.  Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_greybus_driver(__greybus_driver)        \
+       module_driver(__greybus_driver, greybus_register, greybus_deregister)
+
+int greybus_disabled(void);
+
+void gb_debugfs_init(void);
+void gb_debugfs_cleanup(void);
+struct dentry *gb_debugfs_get(void);
+
+extern struct bus_type greybus_bus_type;
+
+extern struct device_type greybus_hd_type;
+extern struct device_type greybus_module_type;
+extern struct device_type greybus_interface_type;
+extern struct device_type greybus_control_type;
+extern struct device_type greybus_bundle_type;
+extern struct device_type greybus_svc_type;
+
+static inline int is_gb_host_device(const struct device *dev)
+{
+       return dev->type == &greybus_hd_type;
+}
+
+static inline int is_gb_module(const struct device *dev)
+{
+       return dev->type == &greybus_module_type;
+}
+
+static inline int is_gb_interface(const struct device *dev)
+{
+       return dev->type == &greybus_interface_type;
+}
+
+static inline int is_gb_control(const struct device *dev)
+{
+       return dev->type == &greybus_control_type;
+}
+
+static inline int is_gb_bundle(const struct device *dev)
+{
+       return dev->type == &greybus_bundle_type;
+}
+
+static inline int is_gb_svc(const struct device *dev)
+{
+       return dev->type == &greybus_svc_type;
+}
+
+static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id)
+{
+       return cport_id != CPORT_ID_BAD && cport_id < hd->num_cports;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_GREYBUS_H */
diff --git a/drivers/staging/greybus/greybus_authentication.h b/drivers/staging/greybus/greybus_authentication.h
new file mode 100644 (file)
index 0000000..4784ed9
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * Greybus Component Authentication User Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of Google Inc. or Linaro Ltd. nor the names of
+ *    its contributors may be used to endorse or promote products
+ *    derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
+ * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __GREYBUS_AUTHENTICATION_USER_H
+#define __GREYBUS_AUTHENTICATION_USER_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define CAP_CERTIFICATE_MAX_SIZE       1600
+#define CAP_SIGNATURE_MAX_SIZE         320
+
+/* Certificate class types */
+#define CAP_CERT_IMS_EAPC              0x00000001
+#define CAP_CERT_IMS_EASC              0x00000002
+#define CAP_CERT_IMS_EARC              0x00000003
+#define CAP_CERT_IMS_IAPC              0x00000004
+#define CAP_CERT_IMS_IASC              0x00000005
+#define CAP_CERT_IMS_IARC              0x00000006
+
+/* IMS Certificate response result codes */
+#define CAP_IMS_RESULT_CERT_FOUND      0x00
+#define CAP_IMS_RESULT_CERT_CLASS_INVAL        0x01
+#define CAP_IMS_RESULT_CERT_CORRUPT    0x02
+#define CAP_IMS_RESULT_CERT_NOT_FOUND  0x03
+
+/* Authentication types */
+#define CAP_AUTH_IMS_PRI               0x00000001
+#define CAP_AUTH_IMS_SEC               0x00000002
+#define CAP_AUTH_IMS_RSA               0x00000003
+
+/* Authenticate response result codes */
+#define CAP_AUTH_RESULT_CR_SUCCESS     0x00
+#define CAP_AUTH_RESULT_CR_BAD_TYPE    0x01
+#define CAP_AUTH_RESULT_CR_WRONG_EP    0x02
+#define CAP_AUTH_RESULT_CR_NO_KEY      0x03
+#define CAP_AUTH_RESULT_CR_SIG_FAIL    0x04
+
+
+/* IOCTL support */
+struct cap_ioc_get_endpoint_uid {
+       __u8                    uid[8];
+} __attribute__ ((__packed__));
+
+struct cap_ioc_get_ims_certificate {
+       __u32                   certificate_class;
+       __u32                   certificate_id;
+
+       __u8                    result_code;
+       __u32                   cert_size;
+       __u8                    certificate[CAP_CERTIFICATE_MAX_SIZE];
+} __attribute__ ((__packed__));
+
+struct cap_ioc_authenticate {
+       __u32                   auth_type;
+       __u8                    uid[8];
+       __u8                    challenge[32];
+
+       __u8                    result_code;
+       __u8                    response[64];
+       __u32                   signature_size;
+       __u8                    signature[CAP_SIGNATURE_MAX_SIZE];
+} __attribute__ ((__packed__));
+
+#define CAP_IOCTL_BASE                 'C'
+#define CAP_IOC_GET_ENDPOINT_UID       _IOR(CAP_IOCTL_BASE, 0, struct cap_ioc_get_endpoint_uid)
+#define CAP_IOC_GET_IMS_CERTIFICATE    _IOWR(CAP_IOCTL_BASE, 1, struct cap_ioc_get_ims_certificate)
+#define CAP_IOC_AUTHENTICATE           _IOWR(CAP_IOCTL_BASE, 2, struct cap_ioc_authenticate)
+
+#endif /* __GREYBUS_AUTHENTICATION_USER_H */
diff --git a/drivers/staging/greybus/greybus_firmware.h b/drivers/staging/greybus/greybus_firmware.h
new file mode 100644 (file)
index 0000000..277a2ac
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * Greybus Firmware Management User Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of Google Inc. or Linaro Ltd. nor the names of
+ *    its contributors may be used to endorse or promote products
+ *    derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
+ * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __GREYBUS_FIRMWARE_USER_H
+#define __GREYBUS_FIRMWARE_USER_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define GB_FIRMWARE_U_TAG_MAX_SIZE             10
+
+#define GB_FW_U_LOAD_METHOD_UNIPRO             0x01
+#define GB_FW_U_LOAD_METHOD_INTERNAL           0x02
+
+#define GB_FW_U_LOAD_STATUS_FAILED             0x00
+#define GB_FW_U_LOAD_STATUS_UNVALIDATED                0x01
+#define GB_FW_U_LOAD_STATUS_VALIDATED          0x02
+#define GB_FW_U_LOAD_STATUS_VALIDATION_FAILED  0x03
+
+#define GB_FW_U_BACKEND_FW_STATUS_SUCCESS      0x01
+#define GB_FW_U_BACKEND_FW_STATUS_FAIL_FIND    0x02
+#define GB_FW_U_BACKEND_FW_STATUS_FAIL_FETCH   0x03
+#define GB_FW_U_BACKEND_FW_STATUS_FAIL_WRITE   0x04
+#define GB_FW_U_BACKEND_FW_STATUS_INT          0x05
+#define GB_FW_U_BACKEND_FW_STATUS_RETRY                0x06
+#define GB_FW_U_BACKEND_FW_STATUS_NOT_SUPPORTED        0x07
+
+#define GB_FW_U_BACKEND_VERSION_STATUS_SUCCESS         0x01
+#define GB_FW_U_BACKEND_VERSION_STATUS_NOT_AVAILABLE   0x02
+#define GB_FW_U_BACKEND_VERSION_STATUS_NOT_SUPPORTED   0x03
+#define GB_FW_U_BACKEND_VERSION_STATUS_RETRY           0x04
+#define GB_FW_U_BACKEND_VERSION_STATUS_FAIL_INT                0x05
+
+/* IOCTL support */
+struct fw_mgmt_ioc_get_intf_version {
+       __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+       __u16 major;
+       __u16 minor;
+} __attribute__ ((__packed__));
+
+struct fw_mgmt_ioc_get_backend_version {
+       __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+       __u16 major;
+       __u16 minor;
+       __u8 status;
+} __attribute__ ((__packed__));
+
+struct fw_mgmt_ioc_intf_load_and_validate {
+       __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+       __u8 load_method;
+       __u8 status;
+       __u16 major;
+       __u16 minor;
+} __attribute__ ((__packed__));
+
+struct fw_mgmt_ioc_backend_fw_update {
+       __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+       __u8 status;
+} __attribute__ ((__packed__));
+
+#define FW_MGMT_IOCTL_BASE                     'F'
+#define FW_MGMT_IOC_GET_INTF_FW                        _IOR(FW_MGMT_IOCTL_BASE, 0, struct fw_mgmt_ioc_get_intf_version)
+#define FW_MGMT_IOC_GET_BACKEND_FW             _IOWR(FW_MGMT_IOCTL_BASE, 1, struct fw_mgmt_ioc_get_backend_version)
+#define FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE     _IOWR(FW_MGMT_IOCTL_BASE, 2, struct fw_mgmt_ioc_intf_load_and_validate)
+#define FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE     _IOWR(FW_MGMT_IOCTL_BASE, 3, struct fw_mgmt_ioc_backend_fw_update)
+#define FW_MGMT_IOC_SET_TIMEOUT_MS             _IOW(FW_MGMT_IOCTL_BASE, 4, unsigned int)
+#define FW_MGMT_IOC_MODE_SWITCH                        _IO(FW_MGMT_IOCTL_BASE, 5)
+
+#endif /* __GREYBUS_FIRMWARE_USER_H */
+
diff --git a/drivers/staging/greybus/greybus_id.h b/drivers/staging/greybus/greybus_id.h
new file mode 100644 (file)
index 0000000..4bb1fc1
--- /dev/null
@@ -0,0 +1,26 @@
+/* FIXME
+ * move this to include/linux/mod_devicetable.h when merging
+ */
+
+#ifndef __LINUX_GREYBUS_ID_H
+#define __LINUX_GREYBUS_ID_H
+
+#include <linux/types.h>
+#include <linux/mod_devicetable.h>
+
+
+struct greybus_bundle_id {
+       __u16   match_flags;
+       __u32   vendor;
+       __u32   product;
+       __u8    class;
+
+       kernel_ulong_t  driver_info __aligned(sizeof(kernel_ulong_t));
+};
+
+/* Used to match the greybus_bundle_id */
+#define GREYBUS_ID_MATCH_VENDOR                BIT(0)
+#define GREYBUS_ID_MATCH_PRODUCT       BIT(1)
+#define GREYBUS_ID_MATCH_CLASS         BIT(2)
+
+#endif /* __LINUX_GREYBUS_ID_H */
diff --git a/drivers/staging/greybus/greybus_manifest.h b/drivers/staging/greybus/greybus_manifest.h
new file mode 100644 (file)
index 0000000..d135945
--- /dev/null
@@ -0,0 +1,177 @@
+/*
+ * Greybus manifest definition
+ *
+ * See "Greybus Application Protocol" document (version 0.1) for
+ * details on these values and structures.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 and BSD licenses.
+ */
+
+#ifndef __GREYBUS_MANIFEST_H
+#define __GREYBUS_MANIFEST_H
+
+enum greybus_descriptor_type {
+       GREYBUS_TYPE_INVALID            = 0x00,
+       GREYBUS_TYPE_INTERFACE          = 0x01,
+       GREYBUS_TYPE_STRING             = 0x02,
+       GREYBUS_TYPE_BUNDLE             = 0x03,
+       GREYBUS_TYPE_CPORT              = 0x04,
+};
+
+enum greybus_protocol {
+       GREYBUS_PROTOCOL_CONTROL        = 0x00,
+       /* 0x01 is unused */
+       GREYBUS_PROTOCOL_GPIO           = 0x02,
+       GREYBUS_PROTOCOL_I2C            = 0x03,
+       GREYBUS_PROTOCOL_UART           = 0x04,
+       GREYBUS_PROTOCOL_HID            = 0x05,
+       GREYBUS_PROTOCOL_USB            = 0x06,
+       GREYBUS_PROTOCOL_SDIO           = 0x07,
+       GREYBUS_PROTOCOL_POWER_SUPPLY   = 0x08,
+       GREYBUS_PROTOCOL_PWM            = 0x09,
+       /* 0x0a is unused */
+       GREYBUS_PROTOCOL_SPI            = 0x0b,
+       GREYBUS_PROTOCOL_DISPLAY        = 0x0c,
+       GREYBUS_PROTOCOL_CAMERA_MGMT    = 0x0d,
+       GREYBUS_PROTOCOL_SENSOR         = 0x0e,
+       GREYBUS_PROTOCOL_LIGHTS         = 0x0f,
+       GREYBUS_PROTOCOL_VIBRATOR       = 0x10,
+       GREYBUS_PROTOCOL_LOOPBACK       = 0x11,
+       GREYBUS_PROTOCOL_AUDIO_MGMT     = 0x12,
+       GREYBUS_PROTOCOL_AUDIO_DATA     = 0x13,
+       GREYBUS_PROTOCOL_SVC            = 0x14,
+       GREYBUS_PROTOCOL_BOOTROM        = 0x15,
+       GREYBUS_PROTOCOL_CAMERA_DATA    = 0x16,
+       GREYBUS_PROTOCOL_FW_DOWNLOAD    = 0x17,
+       GREYBUS_PROTOCOL_FW_MANAGEMENT  = 0x18,
+       GREYBUS_PROTOCOL_AUTHENTICATION = 0x19,
+       GREYBUS_PROTOCOL_LOG            = 0x1a,
+               /* ... */
+       GREYBUS_PROTOCOL_RAW            = 0xfe,
+       GREYBUS_PROTOCOL_VENDOR         = 0xff,
+};
+
+enum greybus_class_type {
+       GREYBUS_CLASS_CONTROL           = 0x00,
+       /* 0x01 is unused */
+       /* 0x02 is unused */
+       /* 0x03 is unused */
+       /* 0x04 is unused */
+       GREYBUS_CLASS_HID               = 0x05,
+       /* 0x06 is unused */
+       /* 0x07 is unused */
+       GREYBUS_CLASS_POWER_SUPPLY      = 0x08,
+       /* 0x09 is unused */
+       GREYBUS_CLASS_BRIDGED_PHY       = 0x0a,
+       /* 0x0b is unused */
+       GREYBUS_CLASS_DISPLAY           = 0x0c,
+       GREYBUS_CLASS_CAMERA            = 0x0d,
+       GREYBUS_CLASS_SENSOR            = 0x0e,
+       GREYBUS_CLASS_LIGHTS            = 0x0f,
+       GREYBUS_CLASS_VIBRATOR          = 0x10,
+       GREYBUS_CLASS_LOOPBACK          = 0x11,
+       GREYBUS_CLASS_AUDIO             = 0x12,
+       /* 0x13 is unused */
+       /* 0x14 is unused */
+       GREYBUS_CLASS_BOOTROM           = 0x15,
+       GREYBUS_CLASS_FW_MANAGEMENT     = 0x16,
+       GREYBUS_CLASS_LOG               = 0x17,
+               /* ... */
+       GREYBUS_CLASS_RAW               = 0xfe,
+       GREYBUS_CLASS_VENDOR            = 0xff,
+};
+
+enum {
+       GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0),
+};
+
+/*
+ * The string in a string descriptor is not NUL-terminated.  The
+ * size of the descriptor will be rounded up to a multiple of 4
+ * bytes, by padding the string with 0x00 bytes if necessary.
+ */
+struct greybus_descriptor_string {
+       __u8    length;
+       __u8    id;
+       __u8    string[0];
+} __packed;
+
+/*
+ * An interface descriptor describes information about an interface as a whole,
+ * *not* the functions within it.
+ */
+struct greybus_descriptor_interface {
+       __u8    vendor_stringid;
+       __u8    product_stringid;
+       __u8    features;
+       __u8    pad;
+} __packed;
+
+/*
+ * An bundle descriptor defines an identification number and a class for
+ * each bundle.
+ *
+ * @id: Uniquely identifies a bundle within a interface, its sole purpose is to
+ * allow CPort descriptors to specify which bundle they are associated with.
+ * The first bundle will have id 0, second will have 1 and so on.
+ *
+ * The largest CPort id associated with an bundle (defined by a
+ * CPort descriptor in the manifest) is used to determine how to
+ * encode the device id and module number in UniPro packets
+ * that use the bundle.
+ *
+ * @class: It is used by kernel to know the functionality provided by the
+ * bundle and will be matched against drivers functinality while probing greybus
+ * driver. It should contain one of the values defined in
+ * 'enum greybus_class_type'.
+ *
+ */
+struct greybus_descriptor_bundle {
+       __u8    id;     /* interface-relative id (0..) */
+       __u8    class;
+       __u8    pad[2];
+} __packed;
+
+/*
+ * A CPort descriptor indicates the id of the bundle within the
+ * module it's associated with, along with the CPort id used to
+ * address the CPort.  The protocol id defines the format of messages
+ * exchanged using the CPort.
+ */
+struct greybus_descriptor_cport {
+       __le16  id;
+       __u8    bundle;
+       __u8    protocol_id;    /* enum greybus_protocol */
+} __packed;
+
+struct greybus_descriptor_header {
+       __le16  size;
+       __u8    type;           /* enum greybus_descriptor_type */
+       __u8    pad;
+} __packed;
+
+struct greybus_descriptor {
+       struct greybus_descriptor_header                header;
+       union {
+               struct greybus_descriptor_string        string;
+               struct greybus_descriptor_interface     interface;
+               struct greybus_descriptor_bundle        bundle;
+               struct greybus_descriptor_cport         cport;
+       };
+} __packed;
+
+struct greybus_manifest_header {
+       __le16  size;
+       __u8    version_major;
+       __u8    version_minor;
+} __packed;
+
+struct greybus_manifest {
+       struct greybus_manifest_header          header;
+       struct greybus_descriptor               descriptors[0];
+} __packed;
+
+#endif /* __GREYBUS_MANIFEST_H */
diff --git a/drivers/staging/greybus/greybus_protocols.h b/drivers/staging/greybus/greybus_protocols.h
new file mode 100644 (file)
index 0000000..6395783
--- /dev/null
@@ -0,0 +1,2268 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 - 2015 Google Inc. All rights reserved.
+ * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 - 2015 Google Inc. All rights reserved.
+ * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of Google Inc. or Linaro Ltd. nor the names of
+ *    its contributors may be used to endorse or promote products
+ *    derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
+ * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __GREYBUS_PROTOCOLS_H
+#define __GREYBUS_PROTOCOLS_H
+
+/* Fixed IDs for control/svc protocols */
+
+/* SVC switch-port device ids */
+#define GB_SVC_DEVICE_ID_SVC                   0
+#define GB_SVC_DEVICE_ID_AP                    1
+#define GB_SVC_DEVICE_ID_MIN                   2
+#define GB_SVC_DEVICE_ID_MAX                   31
+
+#define GB_SVC_CPORT_ID                                0
+#define GB_CONTROL_BUNDLE_ID                   0
+#define GB_CONTROL_CPORT_ID                    0
+
+
+/*
+ * All operation messages (both requests and responses) begin with
+ * a header that encodes the size of the message (header included).
+ * This header also contains a unique identifier, that associates a
+ * response message with its operation.  The header contains an
+ * operation type field, whose interpretation is dependent on what
+ * type of protocol is used over the connection.  The high bit
+ * (0x80) of the operation type field is used to indicate whether
+ * the message is a request (clear) or a response (set).
+ *
+ * Response messages include an additional result byte, which
+ * communicates the result of the corresponding request.  A zero
+ * result value means the operation completed successfully.  Any
+ * other value indicates an error; in this case, the payload of the
+ * response message (if any) is ignored.  The result byte must be
+ * zero in the header for a request message.
+ *
+ * The wire format for all numeric fields in the header is little
+ * endian.  Any operation-specific data begins immediately after the
+ * header.
+ */
+struct gb_operation_msg_hdr {
+       __le16  size;           /* Size in bytes of header + payload */
+       __le16  operation_id;   /* Operation unique id */
+       __u8    type;           /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
+       __u8    result;         /* Result of request (in responses only) */
+       __u8    pad[2];         /* must be zero (ignore when read) */
+} __packed;
+
+
+/* Generic request types */
+#define GB_REQUEST_TYPE_CPORT_SHUTDOWN         0x00
+#define GB_REQUEST_TYPE_INVALID                        0x7f
+
+struct gb_cport_shutdown_request {
+       __u8 phase;
+} __packed;
+
+
+/* Control Protocol */
+
+/* Greybus control request types */
+#define GB_CONTROL_TYPE_VERSION                        0x01
+#define GB_CONTROL_TYPE_PROBE_AP               0x02
+#define GB_CONTROL_TYPE_GET_MANIFEST_SIZE      0x03
+#define GB_CONTROL_TYPE_GET_MANIFEST           0x04
+#define GB_CONTROL_TYPE_CONNECTED              0x05
+#define GB_CONTROL_TYPE_DISCONNECTED           0x06
+#define GB_CONTROL_TYPE_TIMESYNC_ENABLE                0x07
+#define GB_CONTROL_TYPE_TIMESYNC_DISABLE       0x08
+#define GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE 0x09
+/*     Unused                                  0x0a */
+#define GB_CONTROL_TYPE_BUNDLE_VERSION         0x0b
+#define GB_CONTROL_TYPE_DISCONNECTING          0x0c
+#define GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT        0x0d
+#define GB_CONTROL_TYPE_MODE_SWITCH            0x0e
+#define GB_CONTROL_TYPE_BUNDLE_SUSPEND         0x0f
+#define GB_CONTROL_TYPE_BUNDLE_RESUME          0x10
+#define GB_CONTROL_TYPE_BUNDLE_DEACTIVATE      0x11
+#define GB_CONTROL_TYPE_BUNDLE_ACTIVATE                0x12
+#define GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE           0x13
+#define GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE        0x14
+#define GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT   0x15
+
+struct gb_control_version_request {
+       __u8    major;
+       __u8    minor;
+} __packed;
+
+struct gb_control_version_response {
+       __u8    major;
+       __u8    minor;
+} __packed;
+
+struct gb_control_bundle_version_request {
+       __u8    bundle_id;
+} __packed;
+
+struct gb_control_bundle_version_response {
+       __u8    major;
+       __u8    minor;
+} __packed;
+
+/* Control protocol manifest get size request has no payload*/
+struct gb_control_get_manifest_size_response {
+       __le16                  size;
+} __packed;
+
+/* Control protocol manifest get request has no payload */
+struct gb_control_get_manifest_response {
+       __u8                    data[0];
+} __packed;
+
+/* Control protocol [dis]connected request */
+struct gb_control_connected_request {
+       __le16                  cport_id;
+} __packed;
+
+struct gb_control_disconnecting_request {
+       __le16                  cport_id;
+} __packed;
+/* disconnecting response has no payload */
+
+struct gb_control_disconnected_request {
+       __le16                  cport_id;
+} __packed;
+/* Control protocol [dis]connected response has no payload */
+
+#define GB_TIMESYNC_MAX_STROBES                        0x04
+
+struct gb_control_timesync_enable_request {
+       __u8    count;
+       __le64  frame_time;
+       __le32  strobe_delay;
+       __le32  refclk;
+} __packed;
+/* timesync enable response has no payload */
+
+struct gb_control_timesync_authoritative_request {
+       __le64  frame_time[GB_TIMESYNC_MAX_STROBES];
+} __packed;
+/* timesync authoritative response has no payload */
+
+/* timesync get_last_event_request has no payload */
+struct gb_control_timesync_get_last_event_response {
+       __le64  frame_time;
+} __packed;
+
+/*
+ * All Bundle power management operations use the same request and response
+ * layout and status codes.
+ */
+
+#define GB_CONTROL_BUNDLE_PM_OK                0x00
+#define GB_CONTROL_BUNDLE_PM_INVAL     0x01
+#define GB_CONTROL_BUNDLE_PM_BUSY      0x02
+#define GB_CONTROL_BUNDLE_PM_FAIL      0x03
+#define GB_CONTROL_BUNDLE_PM_NA                0x04
+
+struct gb_control_bundle_pm_request {
+       __u8    bundle_id;
+} __packed;
+
+struct gb_control_bundle_pm_response {
+       __u8    status;
+} __packed;
+
+/*
+ * Interface Suspend Prepare and Deactivate Prepare operations use the same
+ * response layout and error codes. Define a single response structure and reuse
+ * it. Both operations have no payload.
+ */
+
+#define GB_CONTROL_INTF_PM_OK          0x00
+#define GB_CONTROL_INTF_PM_BUSY                0x01
+#define GB_CONTROL_INTF_PM_NA          0x02
+
+struct gb_control_intf_pm_response {
+       __u8    status;
+} __packed;
+
+/* APBridge protocol */
+
+/* request APB1 log */
+#define GB_APB_REQUEST_LOG                     0x02
+
+/* request to map a cport to bulk in and bulk out endpoints */
+#define GB_APB_REQUEST_EP_MAPPING              0x03
+
+/* request to get the number of cports available */
+#define GB_APB_REQUEST_CPORT_COUNT             0x04
+
+/* request to reset a cport state */
+#define GB_APB_REQUEST_RESET_CPORT             0x05
+
+/* request to time the latency of messages on a given cport */
+#define GB_APB_REQUEST_LATENCY_TAG_EN          0x06
+#define GB_APB_REQUEST_LATENCY_TAG_DIS         0x07
+
+/* request to control the CSI transmitter */
+#define GB_APB_REQUEST_CSI_TX_CONTROL          0x08
+
+/* request to control audio streaming */
+#define GB_APB_REQUEST_AUDIO_CONTROL           0x09
+
+/* TimeSync requests */
+#define GB_APB_REQUEST_TIMESYNC_ENABLE         0x0d
+#define GB_APB_REQUEST_TIMESYNC_DISABLE                0x0e
+#define GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE  0x0f
+#define GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT 0x10
+
+/* requests to set Greybus CPort flags */
+#define GB_APB_REQUEST_CPORT_FLAGS             0x11
+
+/* ARPC request */
+#define GB_APB_REQUEST_ARPC_RUN                        0x12
+
+struct gb_apb_request_cport_flags {
+       __le32  flags;
+#define GB_APB_CPORT_FLAG_CONTROL              0x01
+#define GB_APB_CPORT_FLAG_HIGH_PRIO            0x02
+} __packed;
+
+
+/* Firmware Download Protocol */
+
+/* Request Types */
+#define GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE      0x01
+#define GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE     0x02
+#define GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE   0x03
+
+#define GB_FIRMWARE_TAG_MAX_SIZE               10
+
+/* firmware download find firmware request/response */
+struct gb_fw_download_find_firmware_request {
+       __u8                    firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+
+struct gb_fw_download_find_firmware_response {
+       __u8                    firmware_id;
+       __le32                  size;
+} __packed;
+
+/* firmware download fetch firmware request/response */
+struct gb_fw_download_fetch_firmware_request {
+       __u8                    firmware_id;
+       __le32                  offset;
+       __le32                  size;
+} __packed;
+
+struct gb_fw_download_fetch_firmware_response {
+       __u8                    data[0];
+} __packed;
+
+/* firmware download release firmware request */
+struct gb_fw_download_release_firmware_request {
+       __u8                    firmware_id;
+} __packed;
+/* firmware download release firmware response has no payload */
+
+
+/* Firmware Management Protocol */
+
+/* Request Types */
+#define GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION   0x01
+#define GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW   0x02
+#define GB_FW_MGMT_TYPE_LOADED_FW              0x03
+#define GB_FW_MGMT_TYPE_BACKEND_FW_VERSION     0x04
+#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE      0x05
+#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED     0x06
+
+#define GB_FW_LOAD_METHOD_UNIPRO               0x01
+#define GB_FW_LOAD_METHOD_INTERNAL             0x02
+
+#define GB_FW_LOAD_STATUS_FAILED               0x00
+#define GB_FW_LOAD_STATUS_UNVALIDATED          0x01
+#define GB_FW_LOAD_STATUS_VALIDATED            0x02
+#define GB_FW_LOAD_STATUS_VALIDATION_FAILED    0x03
+
+#define GB_FW_BACKEND_FW_STATUS_SUCCESS                0x01
+#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND      0x02
+#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH     0x03
+#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE     0x04
+#define GB_FW_BACKEND_FW_STATUS_INT            0x05
+#define GB_FW_BACKEND_FW_STATUS_RETRY          0x06
+#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED  0x07
+
+#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS           0x01
+#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE     0x02
+#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED     0x03
+#define GB_FW_BACKEND_VERSION_STATUS_RETRY             0x04
+#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT          0x05
+
+/* firmware management interface firmware version request has no payload */
+struct gb_fw_mgmt_interface_fw_version_response {
+       __u8                    firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+       __le16                  major;
+       __le16                  minor;
+} __packed;
+
+/* firmware management load and validate firmware request/response */
+struct gb_fw_mgmt_load_and_validate_fw_request {
+       __u8                    request_id;
+       __u8                    load_method;
+       __u8                    firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+/* firmware management load and validate firmware response has no payload*/
+
+/* firmware management loaded firmware request */
+struct gb_fw_mgmt_loaded_fw_request {
+       __u8                    request_id;
+       __u8                    status;
+       __le16                  major;
+       __le16                  minor;
+} __packed;
+/* firmware management loaded firmware response has no payload */
+
+/* firmware management backend firmware version request/response */
+struct gb_fw_mgmt_backend_fw_version_request {
+       __u8                    firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+
+struct gb_fw_mgmt_backend_fw_version_response {
+       __le16                  major;
+       __le16                  minor;
+       __u8                    status;
+} __packed;
+
+/* firmware management backend firmware update request */
+struct gb_fw_mgmt_backend_fw_update_request {
+       __u8                    request_id;
+       __u8                    firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+/* firmware management backend firmware update response has no payload */
+
+/* firmware management backend firmware updated request */
+struct gb_fw_mgmt_backend_fw_updated_request {
+       __u8                    request_id;
+       __u8                    status;
+} __packed;
+/* firmware management backend firmware updated response has no payload */
+
+
+/* Component Authentication Protocol (CAP) */
+
+/* Request Types */
+#define GB_CAP_TYPE_GET_ENDPOINT_UID   0x01
+#define GB_CAP_TYPE_GET_IMS_CERTIFICATE        0x02
+#define GB_CAP_TYPE_AUTHENTICATE       0x03
+
+/* CAP get endpoint uid request has no payload */
+struct gb_cap_get_endpoint_uid_response {
+       __u8                    uid[8];
+} __packed;
+
+/* CAP get endpoint ims certificate request/response */
+struct gb_cap_get_ims_certificate_request {
+       __le32                  certificate_class;
+       __le32                  certificate_id;
+} __packed;
+
+struct gb_cap_get_ims_certificate_response {
+       __u8                    result_code;
+       __u8                    certificate[0];
+} __packed;
+
+/* CAP authenticate request/response */
+struct gb_cap_authenticate_request {
+       __le32                  auth_type;
+       __u8                    uid[8];
+       __u8                    challenge[32];
+} __packed;
+
+struct gb_cap_authenticate_response {
+       __u8                    result_code;
+       __u8                    response[64];
+       __u8                    signature[0];
+} __packed;
+
+
+/* Bootrom Protocol */
+
+/* Version of the Greybus bootrom protocol we support */
+#define GB_BOOTROM_VERSION_MAJOR               0x00
+#define GB_BOOTROM_VERSION_MINOR               0x01
+
+/* Greybus bootrom request types */
+#define GB_BOOTROM_TYPE_VERSION                        0x01
+#define GB_BOOTROM_TYPE_FIRMWARE_SIZE          0x02
+#define GB_BOOTROM_TYPE_GET_FIRMWARE           0x03
+#define GB_BOOTROM_TYPE_READY_TO_BOOT          0x04
+#define GB_BOOTROM_TYPE_AP_READY               0x05    /* Request with no-payload */
+#define GB_BOOTROM_TYPE_GET_VID_PID            0x06    /* Request with no-payload */
+
+/* Greybus bootrom boot stages */
+#define GB_BOOTROM_BOOT_STAGE_ONE              0x01 /* Reserved for the boot ROM */
+#define GB_BOOTROM_BOOT_STAGE_TWO              0x02 /* Bootrom package to be loaded by the boot ROM */
+#define GB_BOOTROM_BOOT_STAGE_THREE            0x03 /* Module personality package loaded by Stage 2 firmware */
+
+/* Greybus bootrom ready to boot status */
+#define GB_BOOTROM_BOOT_STATUS_INVALID         0x00 /* Firmware blob could not be validated */
+#define GB_BOOTROM_BOOT_STATUS_INSECURE                0x01 /* Firmware blob is valid but insecure */
+#define GB_BOOTROM_BOOT_STATUS_SECURE          0x02 /* Firmware blob is valid and secure */
+
+/* Max bootrom data fetch size in bytes */
+#define GB_BOOTROM_FETCH_MAX                   2000
+
+struct gb_bootrom_version_request {
+       __u8    major;
+       __u8    minor;
+} __packed;
+
+struct gb_bootrom_version_response {
+       __u8    major;
+       __u8    minor;
+} __packed;
+
+/* Bootrom protocol firmware size request/response */
+struct gb_bootrom_firmware_size_request {
+       __u8                    stage;
+} __packed;
+
+struct gb_bootrom_firmware_size_response {
+       __le32                  size;
+} __packed;
+
+/* Bootrom protocol get firmware request/response */
+struct gb_bootrom_get_firmware_request {
+       __le32                  offset;
+       __le32                  size;
+} __packed;
+
+struct gb_bootrom_get_firmware_response {
+       __u8                    data[0];
+} __packed;
+
+/* Bootrom protocol Ready to boot request */
+struct gb_bootrom_ready_to_boot_request {
+       __u8                    status;
+} __packed;
+/* Bootrom protocol Ready to boot response has no payload */
+
+/* Bootrom protocol get VID/PID request has no payload */
+struct gb_bootrom_get_vid_pid_response {
+       __le32                  vendor_id;
+       __le32                  product_id;
+} __packed;
+
+
+/* Power Supply */
+
+/* Greybus power supply request types */
+#define GB_POWER_SUPPLY_TYPE_GET_SUPPLIES              0x02
+#define GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION           0x03
+#define GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS      0x04
+#define GB_POWER_SUPPLY_TYPE_GET_PROPERTY              0x05
+#define GB_POWER_SUPPLY_TYPE_SET_PROPERTY              0x06
+#define GB_POWER_SUPPLY_TYPE_EVENT                     0x07
+
+/* Greybus power supply battery technologies types */
+#define GB_POWER_SUPPLY_TECH_UNKNOWN                   0x0000
+#define GB_POWER_SUPPLY_TECH_NiMH                      0x0001
+#define GB_POWER_SUPPLY_TECH_LION                      0x0002
+#define GB_POWER_SUPPLY_TECH_LIPO                      0x0003
+#define GB_POWER_SUPPLY_TECH_LiFe                      0x0004
+#define GB_POWER_SUPPLY_TECH_NiCd                      0x0005
+#define GB_POWER_SUPPLY_TECH_LiMn                      0x0006
+
+/* Greybus power supply types */
+#define GB_POWER_SUPPLY_UNKNOWN_TYPE                   0x0000
+#define GB_POWER_SUPPLY_BATTERY_TYPE                   0x0001
+#define GB_POWER_SUPPLY_UPS_TYPE                       0x0002
+#define GB_POWER_SUPPLY_MAINS_TYPE                     0x0003
+#define GB_POWER_SUPPLY_USB_TYPE                       0x0004
+#define GB_POWER_SUPPLY_USB_DCP_TYPE                   0x0005
+#define GB_POWER_SUPPLY_USB_CDP_TYPE                   0x0006
+#define GB_POWER_SUPPLY_USB_ACA_TYPE                   0x0007
+
+/* Greybus power supply health values */
+#define GB_POWER_SUPPLY_HEALTH_UNKNOWN                 0x0000
+#define GB_POWER_SUPPLY_HEALTH_GOOD                    0x0001
+#define GB_POWER_SUPPLY_HEALTH_OVERHEAT                        0x0002
+#define GB_POWER_SUPPLY_HEALTH_DEAD                    0x0003
+#define GB_POWER_SUPPLY_HEALTH_OVERVOLTAGE             0x0004
+#define GB_POWER_SUPPLY_HEALTH_UNSPEC_FAILURE          0x0005
+#define GB_POWER_SUPPLY_HEALTH_COLD                    0x0006
+#define GB_POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE   0x0007
+#define GB_POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE     0x0008
+
+/* Greybus power supply status values */
+#define GB_POWER_SUPPLY_STATUS_UNKNOWN                 0x0000
+#define GB_POWER_SUPPLY_STATUS_CHARGING                        0x0001
+#define GB_POWER_SUPPLY_STATUS_DISCHARGING             0x0002
+#define GB_POWER_SUPPLY_STATUS_NOT_CHARGING            0x0003
+#define GB_POWER_SUPPLY_STATUS_FULL                    0x0004
+
+/* Greybus power supply capacity level values */
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN         0x0000
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL                0x0001
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_LOW             0x0002
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_NORMAL          0x0003
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_HIGH            0x0004
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_FULL            0x0005
+
+/* Greybus power supply scope values */
+#define GB_POWER_SUPPLY_SCOPE_UNKNOWN                  0x0000
+#define GB_POWER_SUPPLY_SCOPE_SYSTEM                   0x0001
+#define GB_POWER_SUPPLY_SCOPE_DEVICE                   0x0002
+
+struct gb_power_supply_get_supplies_response {
+       __u8    supplies_count;
+} __packed;
+
+struct gb_power_supply_get_description_request {
+       __u8    psy_id;
+} __packed;
+
+struct gb_power_supply_get_description_response {
+       __u8    manufacturer[32];
+       __u8    model[32];
+       __u8    serial_number[32];
+       __le16  type;
+       __u8    properties_count;
+} __packed;
+
+struct gb_power_supply_props_desc {
+       __u8    property;
+#define GB_POWER_SUPPLY_PROP_STATUS                            0x00
+#define GB_POWER_SUPPLY_PROP_CHARGE_TYPE                       0x01
+#define GB_POWER_SUPPLY_PROP_HEALTH                            0x02
+#define GB_POWER_SUPPLY_PROP_PRESENT                           0x03
+#define GB_POWER_SUPPLY_PROP_ONLINE                            0x04
+#define GB_POWER_SUPPLY_PROP_AUTHENTIC                         0x05
+#define GB_POWER_SUPPLY_PROP_TECHNOLOGY                                0x06
+#define GB_POWER_SUPPLY_PROP_CYCLE_COUNT                       0x07
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX                       0x08
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN                       0x09
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN                        0x0A
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN                        0x0B
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_NOW                       0x0C
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_AVG                       0x0D
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_OCV                       0x0E
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT                      0x0F
+#define GB_POWER_SUPPLY_PROP_CURRENT_MAX                       0x10
+#define GB_POWER_SUPPLY_PROP_CURRENT_NOW                       0x11
+#define GB_POWER_SUPPLY_PROP_CURRENT_AVG                       0x12
+#define GB_POWER_SUPPLY_PROP_CURRENT_BOOT                      0x13
+#define GB_POWER_SUPPLY_PROP_POWER_NOW                         0x14
+#define GB_POWER_SUPPLY_PROP_POWER_AVG                         0x15
+#define GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN                        0x16
+#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN               0x17
+#define GB_POWER_SUPPLY_PROP_CHARGE_FULL                       0x18
+#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY                      0x19
+#define GB_POWER_SUPPLY_PROP_CHARGE_NOW                                0x1A
+#define GB_POWER_SUPPLY_PROP_CHARGE_AVG                                0x1B
+#define GB_POWER_SUPPLY_PROP_CHARGE_COUNTER                    0x1C
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT           0x1D
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX       0x1E
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE           0x1F
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX       0x20
+#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT              0x21
+#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX          0x22
+#define GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT               0x23
+#define GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN                        0x24
+#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN               0x25
+#define GB_POWER_SUPPLY_PROP_ENERGY_FULL                       0x26
+#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY                      0x27
+#define GB_POWER_SUPPLY_PROP_ENERGY_NOW                                0x28
+#define GB_POWER_SUPPLY_PROP_ENERGY_AVG                                0x29
+#define GB_POWER_SUPPLY_PROP_CAPACITY                          0x2A
+#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN                        0x2B
+#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX                        0x2C
+#define GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL                    0x2D
+#define GB_POWER_SUPPLY_PROP_TEMP                              0x2E
+#define GB_POWER_SUPPLY_PROP_TEMP_MAX                          0x2F
+#define GB_POWER_SUPPLY_PROP_TEMP_MIN                          0x30
+#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN                    0x31
+#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX                    0x32
+#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT                      0x33
+#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN            0x34
+#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX            0x35
+#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW                 0x36
+#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG                 0x37
+#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW                  0x38
+#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG                  0x39
+#define GB_POWER_SUPPLY_PROP_TYPE                              0x3A
+#define GB_POWER_SUPPLY_PROP_SCOPE                             0x3B
+#define GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT               0x3C
+#define GB_POWER_SUPPLY_PROP_CALIBRATE                         0x3D
+       __u8    is_writeable;
+} __packed;
+
+struct gb_power_supply_get_property_descriptors_request {
+       __u8    psy_id;
+} __packed;
+
+struct gb_power_supply_get_property_descriptors_response {
+       __u8    properties_count;
+       struct gb_power_supply_props_desc props[];
+} __packed;
+
+struct gb_power_supply_get_property_request {
+       __u8    psy_id;
+       __u8    property;
+} __packed;
+
+struct gb_power_supply_get_property_response {
+       __le32  prop_val;
+};
+
+struct gb_power_supply_set_property_request {
+       __u8    psy_id;
+       __u8    property;
+       __le32  prop_val;
+} __packed;
+
+struct gb_power_supply_event_request {
+       __u8    psy_id;
+       __u8    event;
+#define GB_POWER_SUPPLY_UPDATE         0x01
+} __packed;
+
+
+/* HID */
+
+/* Greybus HID operation types */
+#define GB_HID_TYPE_GET_DESC           0x02
+#define GB_HID_TYPE_GET_REPORT_DESC    0x03
+#define GB_HID_TYPE_PWR_ON             0x04
+#define GB_HID_TYPE_PWR_OFF            0x05
+#define GB_HID_TYPE_GET_REPORT         0x06
+#define GB_HID_TYPE_SET_REPORT         0x07
+#define GB_HID_TYPE_IRQ_EVENT          0x08
+
+/* Report type */
+#define GB_HID_INPUT_REPORT            0
+#define GB_HID_OUTPUT_REPORT           1
+#define GB_HID_FEATURE_REPORT          2
+
+/* Different request/response structures */
+/* HID get descriptor response */
+struct gb_hid_desc_response {
+       __u8                            bLength;
+       __le16                          wReportDescLength;
+       __le16                          bcdHID;
+       __le16                          wProductID;
+       __le16                          wVendorID;
+       __u8                            bCountryCode;
+} __packed;
+
+/* HID get report request/response */
+struct gb_hid_get_report_request {
+       __u8                            report_type;
+       __u8                            report_id;
+} __packed;
+
+/* HID set report request */
+struct gb_hid_set_report_request {
+       __u8                            report_type;
+       __u8                            report_id;
+       __u8                            report[0];
+} __packed;
+
+/* HID input report request, via interrupt pipe */
+struct gb_hid_input_report_request {
+       __u8                            report[0];
+} __packed;
+
+
+/* I2C */
+
+/* Greybus i2c request types */
+#define GB_I2C_TYPE_FUNCTIONALITY      0x02
+#define GB_I2C_TYPE_TRANSFER           0x05
+
+/* functionality request has no payload */
+struct gb_i2c_functionality_response {
+       __le32  functionality;
+} __packed;
+
+/*
+ * Outgoing data immediately follows the op count and ops array.
+ * The data for each write (master -> slave) op in the array is sent
+ * in order, with no (e.g. pad) bytes separating them.
+ *
+ * Short reads cause the entire transfer request to fail So response
+ * payload consists only of bytes read, and the number of bytes is
+ * exactly what was specified in the corresponding op.  Like
+ * outgoing data, the incoming data is in order and contiguous.
+ */
+struct gb_i2c_transfer_op {
+       __le16  addr;
+       __le16  flags;
+       __le16  size;
+} __packed;
+
+struct gb_i2c_transfer_request {
+       __le16                          op_count;
+       struct gb_i2c_transfer_op       ops[0];         /* op_count of these */
+} __packed;
+struct gb_i2c_transfer_response {
+       __u8                            data[0];        /* inbound data */
+} __packed;
+
+
+/* GPIO */
+
+/* Greybus GPIO request types */
+#define GB_GPIO_TYPE_LINE_COUNT                0x02
+#define GB_GPIO_TYPE_ACTIVATE          0x03
+#define GB_GPIO_TYPE_DEACTIVATE                0x04
+#define GB_GPIO_TYPE_GET_DIRECTION     0x05
+#define GB_GPIO_TYPE_DIRECTION_IN      0x06
+#define GB_GPIO_TYPE_DIRECTION_OUT     0x07
+#define GB_GPIO_TYPE_GET_VALUE         0x08
+#define GB_GPIO_TYPE_SET_VALUE         0x09
+#define GB_GPIO_TYPE_SET_DEBOUNCE      0x0a
+#define GB_GPIO_TYPE_IRQ_TYPE          0x0b
+#define GB_GPIO_TYPE_IRQ_MASK          0x0c
+#define GB_GPIO_TYPE_IRQ_UNMASK                0x0d
+#define GB_GPIO_TYPE_IRQ_EVENT         0x0e
+
+#define GB_GPIO_IRQ_TYPE_NONE          0x00
+#define GB_GPIO_IRQ_TYPE_EDGE_RISING   0x01
+#define GB_GPIO_IRQ_TYPE_EDGE_FALLING  0x02
+#define GB_GPIO_IRQ_TYPE_EDGE_BOTH     0x03
+#define GB_GPIO_IRQ_TYPE_LEVEL_HIGH    0x04
+#define GB_GPIO_IRQ_TYPE_LEVEL_LOW     0x08
+
+/* line count request has no payload */
+struct gb_gpio_line_count_response {
+       __u8    count;
+} __packed;
+
+struct gb_gpio_activate_request {
+       __u8    which;
+} __packed;
+/* activate response has no payload */
+
+struct gb_gpio_deactivate_request {
+       __u8    which;
+} __packed;
+/* deactivate response has no payload */
+
+struct gb_gpio_get_direction_request {
+       __u8    which;
+} __packed;
+struct gb_gpio_get_direction_response {
+       __u8    direction;
+} __packed;
+
+struct gb_gpio_direction_in_request {
+       __u8    which;
+} __packed;
+/* direction in response has no payload */
+
+struct gb_gpio_direction_out_request {
+       __u8    which;
+       __u8    value;
+} __packed;
+/* direction out response has no payload */
+
+struct gb_gpio_get_value_request {
+       __u8    which;
+} __packed;
+struct gb_gpio_get_value_response {
+       __u8    value;
+} __packed;
+
+struct gb_gpio_set_value_request {
+       __u8    which;
+       __u8    value;
+} __packed;
+/* set value response has no payload */
+
+struct gb_gpio_set_debounce_request {
+       __u8    which;
+       __le16  usec;
+} __packed;
+/* debounce response has no payload */
+
+struct gb_gpio_irq_type_request {
+       __u8    which;
+       __u8    type;
+} __packed;
+/* irq type response has no payload */
+
+struct gb_gpio_irq_mask_request {
+       __u8    which;
+} __packed;
+/* irq mask response has no payload */
+
+struct gb_gpio_irq_unmask_request {
+       __u8    which;
+} __packed;
+/* irq unmask response has no payload */
+
+/* irq event requests originate on another module and are handled on the AP */
+struct gb_gpio_irq_event_request {
+       __u8    which;
+} __packed;
+/* irq event has no response */
+
+
+/* PWM */
+
+/* Greybus PWM operation types */
+#define GB_PWM_TYPE_PWM_COUNT          0x02
+#define GB_PWM_TYPE_ACTIVATE           0x03
+#define GB_PWM_TYPE_DEACTIVATE         0x04
+#define GB_PWM_TYPE_CONFIG             0x05
+#define GB_PWM_TYPE_POLARITY           0x06
+#define GB_PWM_TYPE_ENABLE             0x07
+#define GB_PWM_TYPE_DISABLE            0x08
+
+/* pwm count request has no payload */
+struct gb_pwm_count_response {
+       __u8    count;
+} __packed;
+
+struct gb_pwm_activate_request {
+       __u8    which;
+} __packed;
+
+struct gb_pwm_deactivate_request {
+       __u8    which;
+} __packed;
+
+struct gb_pwm_config_request {
+       __u8    which;
+       __le32  duty;
+       __le32  period;
+} __packed;
+
+struct gb_pwm_polarity_request {
+       __u8    which;
+       __u8    polarity;
+} __packed;
+
+struct gb_pwm_enable_request {
+       __u8    which;
+} __packed;
+
+struct gb_pwm_disable_request {
+       __u8    which;
+} __packed;
+
+/* SPI */
+
+/* Should match up with modes in linux/spi/spi.h */
+#define GB_SPI_MODE_CPHA               0x01            /* clock phase */
+#define GB_SPI_MODE_CPOL               0x02            /* clock polarity */
+#define GB_SPI_MODE_MODE_0             (0|0)           /* (original MicroWire) */
+#define GB_SPI_MODE_MODE_1             (0|GB_SPI_MODE_CPHA)
+#define GB_SPI_MODE_MODE_2             (GB_SPI_MODE_CPOL|0)
+#define GB_SPI_MODE_MODE_3             (GB_SPI_MODE_CPOL|GB_SPI_MODE_CPHA)
+#define GB_SPI_MODE_CS_HIGH            0x04            /* chipselect active high? */
+#define GB_SPI_MODE_LSB_FIRST          0x08            /* per-word bits-on-wire */
+#define GB_SPI_MODE_3WIRE              0x10            /* SI/SO signals shared */
+#define GB_SPI_MODE_LOOP               0x20            /* loopback mode */
+#define GB_SPI_MODE_NO_CS              0x40            /* 1 dev/bus, no chipselect */
+#define GB_SPI_MODE_READY              0x80            /* slave pulls low to pause */
+
+/* Should match up with flags in linux/spi/spi.h */
+#define GB_SPI_FLAG_HALF_DUPLEX                BIT(0)          /* can't do full duplex */
+#define GB_SPI_FLAG_NO_RX              BIT(1)          /* can't do buffer read */
+#define GB_SPI_FLAG_NO_TX              BIT(2)          /* can't do buffer write */
+
+/* Greybus spi operation types */
+#define GB_SPI_TYPE_MASTER_CONFIG      0x02
+#define GB_SPI_TYPE_DEVICE_CONFIG      0x03
+#define GB_SPI_TYPE_TRANSFER           0x04
+
+/* mode request has no payload */
+struct gb_spi_master_config_response {
+       __le32  bits_per_word_mask;
+       __le32  min_speed_hz;
+       __le32  max_speed_hz;
+       __le16  mode;
+       __le16  flags;
+       __u8    num_chipselect;
+} __packed;
+
+struct gb_spi_device_config_request {
+       __u8    chip_select;
+} __packed;
+
+struct gb_spi_device_config_response {
+       __le16  mode;
+       __u8    bits_per_word;
+       __le32  max_speed_hz;
+       __u8    device_type;
+#define GB_SPI_SPI_DEV         0x00
+#define GB_SPI_SPI_NOR         0x01
+#define GB_SPI_SPI_MODALIAS    0x02
+       __u8    name[32];
+} __packed;
+
+/**
+ * struct gb_spi_transfer - a read/write buffer pair
+ * @speed_hz: Select a speed other than the device default for this transfer. If
+ *     0 the default (from @spi_device) is used.
+ * @len: size of rx and tx buffers (in bytes)
+ * @delay_usecs: microseconds to delay after this transfer before (optionally)
+ *     changing the chipselect status, then starting the next transfer or
+ *     completing this spi_message.
+ * @cs_change: affects chipselect after this transfer completes
+ * @bits_per_word: select a bits_per_word other than the device default for this
+ *     transfer. If 0 the default (from @spi_device) is used.
+ */
+struct gb_spi_transfer {
+       __le32          speed_hz;
+       __le32          len;
+       __le16          delay_usecs;
+       __u8            cs_change;
+       __u8            bits_per_word;
+       __u8            xfer_flags;
+#define GB_SPI_XFER_READ       0x01
+#define GB_SPI_XFER_WRITE      0x02
+#define GB_SPI_XFER_INPROGRESS 0x04
+} __packed;
+
+struct gb_spi_transfer_request {
+       __u8                    chip_select;    /* of the spi device */
+       __u8                    mode;           /* of the spi device */
+       __le16                  count;
+       struct gb_spi_transfer  transfers[0];   /* count of these */
+} __packed;
+
+struct gb_spi_transfer_response {
+       __u8                    data[0];        /* inbound data */
+} __packed;
+
+/* Version of the Greybus SVC protocol we support */
+#define GB_SVC_VERSION_MAJOR           0x00
+#define GB_SVC_VERSION_MINOR           0x01
+
+/* Greybus SVC request types */
+#define GB_SVC_TYPE_PROTOCOL_VERSION           0x01
+#define GB_SVC_TYPE_SVC_HELLO                  0x02
+#define GB_SVC_TYPE_INTF_DEVICE_ID             0x03
+#define GB_SVC_TYPE_INTF_RESET                 0x06
+#define GB_SVC_TYPE_CONN_CREATE                        0x07
+#define GB_SVC_TYPE_CONN_DESTROY               0x08
+#define GB_SVC_TYPE_DME_PEER_GET               0x09
+#define GB_SVC_TYPE_DME_PEER_SET               0x0a
+#define GB_SVC_TYPE_ROUTE_CREATE               0x0b
+#define GB_SVC_TYPE_ROUTE_DESTROY              0x0c
+#define GB_SVC_TYPE_TIMESYNC_ENABLE            0x0d
+#define GB_SVC_TYPE_TIMESYNC_DISABLE           0x0e
+#define GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE     0x0f
+#define GB_SVC_TYPE_INTF_SET_PWRM              0x10
+#define GB_SVC_TYPE_INTF_EJECT                 0x11
+#define GB_SVC_TYPE_PING                       0x13
+#define GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET      0x14
+#define GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET      0x15
+#define GB_SVC_TYPE_PWRMON_SAMPLE_GET          0x16
+#define GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET     0x17
+#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE 0x18
+#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE 0x19
+#define GB_SVC_TYPE_TIMESYNC_PING              0x1a
+#define GB_SVC_TYPE_MODULE_INSERTED            0x1f
+#define GB_SVC_TYPE_MODULE_REMOVED             0x20
+#define GB_SVC_TYPE_INTF_VSYS_ENABLE           0x21
+#define GB_SVC_TYPE_INTF_VSYS_DISABLE          0x22
+#define GB_SVC_TYPE_INTF_REFCLK_ENABLE         0x23
+#define GB_SVC_TYPE_INTF_REFCLK_DISABLE                0x24
+#define GB_SVC_TYPE_INTF_UNIPRO_ENABLE         0x25
+#define GB_SVC_TYPE_INTF_UNIPRO_DISABLE                0x26
+#define GB_SVC_TYPE_INTF_ACTIVATE              0x27
+#define GB_SVC_TYPE_INTF_RESUME                        0x28
+#define GB_SVC_TYPE_INTF_MAILBOX_EVENT         0x29
+#define GB_SVC_TYPE_INTF_OOPS                  0x2a
+
+/* Greybus SVC protocol status values */
+#define GB_SVC_OP_SUCCESS                      0x00
+#define GB_SVC_OP_UNKNOWN_ERROR                        0x01
+#define GB_SVC_INTF_NOT_DETECTED               0x02
+#define GB_SVC_INTF_NO_UPRO_LINK               0x03
+#define GB_SVC_INTF_UPRO_NOT_DOWN              0x04
+#define GB_SVC_INTF_UPRO_NOT_HIBERNATED                0x05
+#define GB_SVC_INTF_NO_V_SYS                   0x06
+#define GB_SVC_INTF_V_CHG                      0x07
+#define GB_SVC_INTF_WAKE_BUSY                  0x08
+#define GB_SVC_INTF_NO_REFCLK                  0x09
+#define GB_SVC_INTF_RELEASING                  0x0a
+#define GB_SVC_INTF_NO_ORDER                   0x0b
+#define GB_SVC_INTF_MBOX_SET                   0x0c
+#define GB_SVC_INTF_BAD_MBOX                   0x0d
+#define GB_SVC_INTF_OP_TIMEOUT                 0x0e
+#define GB_SVC_PWRMON_OP_NOT_PRESENT           0x0f
+
+struct gb_svc_version_request {
+       __u8    major;
+       __u8    minor;
+} __packed;
+
+struct gb_svc_version_response {
+       __u8    major;
+       __u8    minor;
+} __packed;
+
+/* SVC protocol hello request */
+struct gb_svc_hello_request {
+       __le16                  endo_id;
+       __u8                    interface_id;
+} __packed;
+/* hello response has no payload */
+
+struct gb_svc_intf_device_id_request {
+       __u8    intf_id;
+       __u8    device_id;
+} __packed;
+/* device id response has no payload */
+
+struct gb_svc_intf_reset_request {
+       __u8    intf_id;
+} __packed;
+/* interface reset response has no payload */
+
+struct gb_svc_intf_eject_request {
+       __u8    intf_id;
+} __packed;
+/* interface eject response has no payload */
+
+struct gb_svc_conn_create_request {
+       __u8    intf1_id;
+       __le16  cport1_id;
+       __u8    intf2_id;
+       __le16  cport2_id;
+       __u8    tc;
+       __u8    flags;
+} __packed;
+/* connection create response has no payload */
+
+struct gb_svc_conn_destroy_request {
+       __u8    intf1_id;
+       __le16  cport1_id;
+       __u8    intf2_id;
+       __le16  cport2_id;
+} __packed;
+/* connection destroy response has no payload */
+
+struct gb_svc_dme_peer_get_request {
+       __u8    intf_id;
+       __le16  attr;
+       __le16  selector;
+} __packed;
+
+struct gb_svc_dme_peer_get_response {
+       __le16  result_code;
+       __le32  attr_value;
+} __packed;
+
+struct gb_svc_dme_peer_set_request {
+       __u8    intf_id;
+       __le16  attr;
+       __le16  selector;
+       __le32  value;
+} __packed;
+
+struct gb_svc_dme_peer_set_response {
+       __le16  result_code;
+} __packed;
+
+/* Greybus init-status values, currently retrieved using DME peer gets. */
+#define GB_INIT_SPI_BOOT_STARTED                       0x02
+#define GB_INIT_TRUSTED_SPI_BOOT_FINISHED              0x03
+#define GB_INIT_UNTRUSTED_SPI_BOOT_FINISHED            0x04
+#define GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED            0x06
+#define GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED   0x09
+#define GB_INIT_S2_LOADER_BOOT_STARTED                 0x0D
+
+struct gb_svc_route_create_request {
+       __u8    intf1_id;
+       __u8    dev1_id;
+       __u8    intf2_id;
+       __u8    dev2_id;
+} __packed;
+/* route create response has no payload */
+
+struct gb_svc_route_destroy_request {
+       __u8    intf1_id;
+       __u8    intf2_id;
+} __packed;
+/* route destroy response has no payload */
+
+/* used for svc_intf_vsys_{enable,disable} */
+struct gb_svc_intf_vsys_request {
+       __u8    intf_id;
+} __packed;
+
+struct gb_svc_intf_vsys_response {
+       __u8    result_code;
+#define GB_SVC_INTF_VSYS_OK                            0x00
+       /* 0x01 is reserved */
+#define GB_SVC_INTF_VSYS_FAIL                          0x02
+} __packed;
+
+/* used for svc_intf_refclk_{enable,disable} */
+struct gb_svc_intf_refclk_request {
+       __u8    intf_id;
+} __packed;
+
+struct gb_svc_intf_refclk_response {
+       __u8    result_code;
+#define GB_SVC_INTF_REFCLK_OK                          0x00
+       /* 0x01 is reserved */
+#define GB_SVC_INTF_REFCLK_FAIL                                0x02
+} __packed;
+
+/* used for svc_intf_unipro_{enable,disable} */
+struct gb_svc_intf_unipro_request {
+       __u8    intf_id;
+} __packed;
+
+struct gb_svc_intf_unipro_response {
+       __u8    result_code;
+#define GB_SVC_INTF_UNIPRO_OK                          0x00
+       /* 0x01 is reserved */
+#define GB_SVC_INTF_UNIPRO_FAIL                                0x02
+#define GB_SVC_INTF_UNIPRO_NOT_OFF                     0x03
+} __packed;
+
+struct gb_svc_timesync_enable_request {
+       __u8    count;
+       __le64  frame_time;
+       __le32  strobe_delay;
+       __le32  refclk;
+} __packed;
+/* timesync enable response has no payload */
+
+/* timesync authoritative request has no payload */
+struct gb_svc_timesync_authoritative_response {
+       __le64  frame_time[GB_TIMESYNC_MAX_STROBES];
+};
+
+struct gb_svc_timesync_wake_pins_acquire_request {
+       __le32  strobe_mask;
+};
+
+/* timesync wake pins acquire response has no payload */
+
+/* timesync wake pins release request has no payload */
+/* timesync wake pins release response has no payload */
+
+/* timesync svc ping request has no payload */
+struct gb_svc_timesync_ping_response {
+       __le64  frame_time;
+} __packed;
+
+#define GB_SVC_UNIPRO_FAST_MODE                        0x01
+#define GB_SVC_UNIPRO_SLOW_MODE                        0x02
+#define GB_SVC_UNIPRO_FAST_AUTO_MODE           0x04
+#define GB_SVC_UNIPRO_SLOW_AUTO_MODE           0x05
+#define GB_SVC_UNIPRO_MODE_UNCHANGED           0x07
+#define GB_SVC_UNIPRO_HIBERNATE_MODE           0x11
+#define GB_SVC_UNIPRO_OFF_MODE                 0x12
+
+#define GB_SVC_SMALL_AMPLITUDE          0x01
+#define GB_SVC_LARGE_AMPLITUDE          0x02
+
+#define GB_SVC_NO_DE_EMPHASIS           0x00
+#define GB_SVC_SMALL_DE_EMPHASIS        0x01
+#define GB_SVC_LARGE_DE_EMPHASIS        0x02
+
+#define GB_SVC_PWRM_RXTERMINATION              0x01
+#define GB_SVC_PWRM_TXTERMINATION              0x02
+#define GB_SVC_PWRM_LINE_RESET                 0x04
+#define GB_SVC_PWRM_SCRAMBLING                 0x20
+
+#define GB_SVC_PWRM_QUIRK_HSSER                        0x00000001
+
+#define GB_SVC_UNIPRO_HS_SERIES_A              0x01
+#define GB_SVC_UNIPRO_HS_SERIES_B              0x02
+
+#define GB_SVC_SETPWRM_PWR_OK           0x00
+#define GB_SVC_SETPWRM_PWR_LOCAL        0x01
+#define GB_SVC_SETPWRM_PWR_REMOTE       0x02
+#define GB_SVC_SETPWRM_PWR_BUSY         0x03
+#define GB_SVC_SETPWRM_PWR_ERROR_CAP    0x04
+#define GB_SVC_SETPWRM_PWR_FATAL_ERROR  0x05
+
+struct gb_svc_l2_timer_cfg {
+       __le16 tsb_fc0_protection_timeout;
+       __le16 tsb_tc0_replay_timeout;
+       __le16 tsb_afc0_req_timeout;
+       __le16 tsb_fc1_protection_timeout;
+       __le16 tsb_tc1_replay_timeout;
+       __le16 tsb_afc1_req_timeout;
+       __le16 reserved_for_tc2[3];
+       __le16 reserved_for_tc3[3];
+} __packed;
+
+struct gb_svc_intf_set_pwrm_request {
+       __u8    intf_id;
+       __u8    hs_series;
+       __u8    tx_mode;
+       __u8    tx_gear;
+       __u8    tx_nlanes;
+       __u8    tx_amplitude;
+       __u8    tx_hs_equalizer;
+       __u8    rx_mode;
+       __u8    rx_gear;
+       __u8    rx_nlanes;
+       __u8    flags;
+       __le32  quirks;
+       struct gb_svc_l2_timer_cfg local_l2timerdata, remote_l2timerdata;
+} __packed;
+
+struct gb_svc_intf_set_pwrm_response {
+       __u8    result_code;
+} __packed;
+
+struct gb_svc_key_event_request {
+       __le16  key_code;
+#define GB_KEYCODE_ARA         0x00
+
+       __u8    key_event;
+#define GB_SVC_KEY_RELEASED    0x00
+#define GB_SVC_KEY_PRESSED     0x01
+} __packed;
+
+#define GB_SVC_PWRMON_MAX_RAIL_COUNT           254
+
+struct gb_svc_pwrmon_rail_count_get_response {
+       __u8    rail_count;
+} __packed;
+
+#define GB_SVC_PWRMON_RAIL_NAME_BUFSIZE                32
+
+struct gb_svc_pwrmon_rail_names_get_response {
+       __u8    status;
+       __u8    name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
+} __packed;
+
+#define GB_SVC_PWRMON_TYPE_CURR                        0x01
+#define GB_SVC_PWRMON_TYPE_VOL                 0x02
+#define GB_SVC_PWRMON_TYPE_PWR                 0x03
+
+#define GB_SVC_PWRMON_GET_SAMPLE_OK            0x00
+#define GB_SVC_PWRMON_GET_SAMPLE_INVAL         0x01
+#define GB_SVC_PWRMON_GET_SAMPLE_NOSUPP                0x02
+#define GB_SVC_PWRMON_GET_SAMPLE_HWERR         0x03
+
+struct gb_svc_pwrmon_sample_get_request {
+       __u8    rail_id;
+       __u8    measurement_type;
+} __packed;
+
+struct gb_svc_pwrmon_sample_get_response {
+       __u8    result;
+       __le32  measurement;
+} __packed;
+
+struct gb_svc_pwrmon_intf_sample_get_request {
+       __u8    intf_id;
+       __u8    measurement_type;
+} __packed;
+
+struct gb_svc_pwrmon_intf_sample_get_response {
+       __u8    result;
+       __le32  measurement;
+} __packed;
+
+#define GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY 0x0001
+
+struct gb_svc_module_inserted_request {
+       __u8    primary_intf_id;
+       __u8    intf_count;
+       __le16  flags;
+} __packed;
+/* module_inserted response has no payload */
+
+struct gb_svc_module_removed_request {
+       __u8    primary_intf_id;
+} __packed;
+/* module_removed response has no payload */
+
+struct gb_svc_intf_activate_request {
+       __u8    intf_id;
+} __packed;
+
+#define GB_SVC_INTF_TYPE_UNKNOWN               0x00
+#define GB_SVC_INTF_TYPE_DUMMY                 0x01
+#define GB_SVC_INTF_TYPE_UNIPRO                        0x02
+#define GB_SVC_INTF_TYPE_GREYBUS               0x03
+
+struct gb_svc_intf_activate_response {
+       __u8    status;
+       __u8    intf_type;
+} __packed;
+
+struct gb_svc_intf_resume_request {
+       __u8    intf_id;
+} __packed;
+
+struct gb_svc_intf_resume_response {
+       __u8    status;
+} __packed;
+
+#define GB_SVC_INTF_MAILBOX_NONE               0x00
+#define GB_SVC_INTF_MAILBOX_AP                 0x01
+#define GB_SVC_INTF_MAILBOX_GREYBUS            0x02
+
+struct gb_svc_intf_mailbox_event_request {
+       __u8    intf_id;
+       __le16  result_code;
+       __le32  mailbox;
+} __packed;
+/* intf_mailbox_event response has no payload */
+
+struct gb_svc_intf_oops_request {
+       __u8    intf_id;
+       __u8    reason;
+} __packed;
+/* intf_oops response has no payload */
+
+
+/* RAW */
+
+/* Greybus raw request types */
+#define        GB_RAW_TYPE_SEND                        0x02
+
+struct gb_raw_send_request {
+       __le32  len;
+       __u8    data[0];
+} __packed;
+
+
+/* UART */
+
+/* Greybus UART operation types */
+#define GB_UART_TYPE_SEND_DATA                 0x02
+#define GB_UART_TYPE_RECEIVE_DATA              0x03    /* Unsolicited data */
+#define GB_UART_TYPE_SET_LINE_CODING           0x04
+#define GB_UART_TYPE_SET_CONTROL_LINE_STATE    0x05
+#define GB_UART_TYPE_SEND_BREAK                        0x06
+#define GB_UART_TYPE_SERIAL_STATE              0x07    /* Unsolicited data */
+#define GB_UART_TYPE_RECEIVE_CREDITS           0x08
+#define GB_UART_TYPE_FLUSH_FIFOS               0x09
+
+/* Represents data from AP -> Module */
+struct gb_uart_send_data_request {
+       __le16  size;
+       __u8    data[0];
+} __packed;
+
+/* recv-data-request flags */
+#define GB_UART_RECV_FLAG_FRAMING              0x01    /* Framing error */
+#define GB_UART_RECV_FLAG_PARITY               0x02    /* Parity error */
+#define GB_UART_RECV_FLAG_OVERRUN              0x04    /* Overrun error */
+#define GB_UART_RECV_FLAG_BREAK                        0x08    /* Break */
+
+/* Represents data from Module -> AP */
+struct gb_uart_recv_data_request {
+       __le16  size;
+       __u8    flags;
+       __u8    data[0];
+} __packed;
+
+struct gb_uart_receive_credits_request {
+       __le16  count;
+} __packed;
+
+struct gb_uart_set_line_coding_request {
+       __le32  rate;
+       __u8    format;
+#define GB_SERIAL_1_STOP_BITS                  0
+#define GB_SERIAL_1_5_STOP_BITS                        1
+#define GB_SERIAL_2_STOP_BITS                  2
+
+       __u8    parity;
+#define GB_SERIAL_NO_PARITY                    0
+#define GB_SERIAL_ODD_PARITY                   1
+#define GB_SERIAL_EVEN_PARITY                  2
+#define GB_SERIAL_MARK_PARITY                  3
+#define GB_SERIAL_SPACE_PARITY                 4
+
+       __u8    data_bits;
+
+       __u8    flow_control;
+#define GB_SERIAL_AUTO_RTSCTS_EN               0x1
+} __packed;
+
+/* output control lines */
+#define GB_UART_CTRL_DTR                       0x01
+#define GB_UART_CTRL_RTS                       0x02
+
+struct gb_uart_set_control_line_state_request {
+       __u8    control;
+} __packed;
+
+struct gb_uart_set_break_request {
+       __u8    state;
+} __packed;
+
+/* input control lines and line errors */
+#define GB_UART_CTRL_DCD                       0x01
+#define GB_UART_CTRL_DSR                       0x02
+#define GB_UART_CTRL_RI                                0x04
+
+struct gb_uart_serial_state_request {
+       __u8    control;
+} __packed;
+
+struct gb_uart_serial_flush_request {
+       __u8    flags;
+#define GB_SERIAL_FLAG_FLUSH_TRANSMITTER       0x01
+#define GB_SERIAL_FLAG_FLUSH_RECEIVER          0x02
+} __packed;
+
+/* Loopback */
+
+/* Greybus loopback request types */
+#define GB_LOOPBACK_TYPE_PING                  0x02
+#define GB_LOOPBACK_TYPE_TRANSFER              0x03
+#define GB_LOOPBACK_TYPE_SINK                  0x04
+
+/*
+ * Loopback request/response header format should be identical
+ * to simplify bandwidth and data movement analysis.
+ */
+struct gb_loopback_transfer_request {
+       __le32  len;
+       __le32  reserved0;
+       __le32  reserved1;
+       __u8    data[0];
+} __packed;
+
+struct gb_loopback_transfer_response {
+       __le32  len;
+       __le32  reserved0;
+       __le32  reserved1;
+       __u8    data[0];
+} __packed;
+
+/* SDIO */
+/* Greybus SDIO operation types */
+#define GB_SDIO_TYPE_GET_CAPABILITIES          0x02
+#define GB_SDIO_TYPE_SET_IOS                   0x03
+#define GB_SDIO_TYPE_COMMAND                   0x04
+#define GB_SDIO_TYPE_TRANSFER                  0x05
+#define GB_SDIO_TYPE_EVENT                     0x06
+
+/* get caps response: request has no payload */
+struct gb_sdio_get_caps_response {
+       __le32  caps;
+#define GB_SDIO_CAP_NONREMOVABLE       0x00000001
+#define GB_SDIO_CAP_4_BIT_DATA         0x00000002
+#define GB_SDIO_CAP_8_BIT_DATA         0x00000004
+#define GB_SDIO_CAP_MMC_HS             0x00000008
+#define GB_SDIO_CAP_SD_HS              0x00000010
+#define GB_SDIO_CAP_ERASE              0x00000020
+#define GB_SDIO_CAP_1_2V_DDR           0x00000040
+#define GB_SDIO_CAP_1_8V_DDR           0x00000080
+#define GB_SDIO_CAP_POWER_OFF_CARD     0x00000100
+#define GB_SDIO_CAP_UHS_SDR12          0x00000200
+#define GB_SDIO_CAP_UHS_SDR25          0x00000400
+#define GB_SDIO_CAP_UHS_SDR50          0x00000800
+#define GB_SDIO_CAP_UHS_SDR104         0x00001000
+#define GB_SDIO_CAP_UHS_DDR50          0x00002000
+#define GB_SDIO_CAP_DRIVER_TYPE_A      0x00004000
+#define GB_SDIO_CAP_DRIVER_TYPE_C      0x00008000
+#define GB_SDIO_CAP_DRIVER_TYPE_D      0x00010000
+#define GB_SDIO_CAP_HS200_1_2V         0x00020000
+#define GB_SDIO_CAP_HS200_1_8V         0x00040000
+#define GB_SDIO_CAP_HS400_1_2V         0x00080000
+#define GB_SDIO_CAP_HS400_1_8V         0x00100000
+
+       /* see possible values below at vdd */
+       __le32 ocr;
+       __le32 f_min;
+       __le32 f_max;
+       __le16 max_blk_count;
+       __le16 max_blk_size;
+} __packed;
+
+/* set ios request: response has no payload */
+struct gb_sdio_set_ios_request {
+       __le32  clock;
+       __le32  vdd;
+#define GB_SDIO_VDD_165_195    0x00000001
+#define GB_SDIO_VDD_20_21      0x00000002
+#define GB_SDIO_VDD_21_22      0x00000004
+#define GB_SDIO_VDD_22_23      0x00000008
+#define GB_SDIO_VDD_23_24      0x00000010
+#define GB_SDIO_VDD_24_25      0x00000020
+#define GB_SDIO_VDD_25_26      0x00000040
+#define GB_SDIO_VDD_26_27      0x00000080
+#define GB_SDIO_VDD_27_28      0x00000100
+#define GB_SDIO_VDD_28_29      0x00000200
+#define GB_SDIO_VDD_29_30      0x00000400
+#define GB_SDIO_VDD_30_31      0x00000800
+#define GB_SDIO_VDD_31_32      0x00001000
+#define GB_SDIO_VDD_32_33      0x00002000
+#define GB_SDIO_VDD_33_34      0x00004000
+#define GB_SDIO_VDD_34_35      0x00008000
+#define GB_SDIO_VDD_35_36      0x00010000
+
+       __u8    bus_mode;
+#define GB_SDIO_BUSMODE_OPENDRAIN      0x00
+#define GB_SDIO_BUSMODE_PUSHPULL       0x01
+
+       __u8    power_mode;
+#define GB_SDIO_POWER_OFF      0x00
+#define GB_SDIO_POWER_UP       0x01
+#define GB_SDIO_POWER_ON       0x02
+#define GB_SDIO_POWER_UNDEFINED        0x03
+
+       __u8    bus_width;
+#define GB_SDIO_BUS_WIDTH_1    0x00
+#define GB_SDIO_BUS_WIDTH_4    0x02
+#define GB_SDIO_BUS_WIDTH_8    0x03
+
+       __u8    timing;
+#define GB_SDIO_TIMING_LEGACY          0x00
+#define GB_SDIO_TIMING_MMC_HS          0x01
+#define GB_SDIO_TIMING_SD_HS           0x02
+#define GB_SDIO_TIMING_UHS_SDR12       0x03
+#define GB_SDIO_TIMING_UHS_SDR25       0x04
+#define GB_SDIO_TIMING_UHS_SDR50       0x05
+#define GB_SDIO_TIMING_UHS_SDR104      0x06
+#define GB_SDIO_TIMING_UHS_DDR50       0x07
+#define GB_SDIO_TIMING_MMC_DDR52       0x08
+#define GB_SDIO_TIMING_MMC_HS200       0x09
+#define GB_SDIO_TIMING_MMC_HS400       0x0A
+
+       __u8    signal_voltage;
+#define GB_SDIO_SIGNAL_VOLTAGE_330     0x00
+#define GB_SDIO_SIGNAL_VOLTAGE_180     0x01
+#define GB_SDIO_SIGNAL_VOLTAGE_120     0x02
+
+       __u8    drv_type;
+#define GB_SDIO_SET_DRIVER_TYPE_B      0x00
+#define GB_SDIO_SET_DRIVER_TYPE_A      0x01
+#define GB_SDIO_SET_DRIVER_TYPE_C      0x02
+#define GB_SDIO_SET_DRIVER_TYPE_D      0x03
+} __packed;
+
+/* command request */
+struct gb_sdio_command_request {
+       __u8    cmd;
+       __u8    cmd_flags;
+#define GB_SDIO_RSP_NONE               0x00
+#define GB_SDIO_RSP_PRESENT            0x01
+#define GB_SDIO_RSP_136                        0x02
+#define GB_SDIO_RSP_CRC                        0x04
+#define GB_SDIO_RSP_BUSY               0x08
+#define GB_SDIO_RSP_OPCODE             0x10
+
+       __u8    cmd_type;
+#define GB_SDIO_CMD_AC         0x00
+#define GB_SDIO_CMD_ADTC       0x01
+#define GB_SDIO_CMD_BC         0x02
+#define GB_SDIO_CMD_BCR                0x03
+
+       __le32  cmd_arg;
+       __le16  data_blocks;
+       __le16  data_blksz;
+} __packed;
+
+struct gb_sdio_command_response {
+       __le32  resp[4];
+} __packed;
+
+/* transfer request */
+struct gb_sdio_transfer_request {
+       __u8    data_flags;
+#define GB_SDIO_DATA_WRITE     0x01
+#define GB_SDIO_DATA_READ      0x02
+#define GB_SDIO_DATA_STREAM    0x04
+
+       __le16  data_blocks;
+       __le16  data_blksz;
+       __u8    data[0];
+} __packed;
+
+struct gb_sdio_transfer_response {
+       __le16  data_blocks;
+       __le16  data_blksz;
+       __u8    data[0];
+} __packed;
+
+/* event request: generated by module and is defined as unidirectional */
+struct gb_sdio_event_request {
+       __u8    event;
+#define GB_SDIO_CARD_INSERTED  0x01
+#define GB_SDIO_CARD_REMOVED   0x02
+#define GB_SDIO_WP             0x04
+} __packed;
+
+/* Camera */
+
+/* Greybus Camera request types */
+#define GB_CAMERA_TYPE_CAPABILITIES            0x02
+#define GB_CAMERA_TYPE_CONFIGURE_STREAMS       0x03
+#define GB_CAMERA_TYPE_CAPTURE                 0x04
+#define GB_CAMERA_TYPE_FLUSH                   0x05
+#define GB_CAMERA_TYPE_METADATA                        0x06
+
+#define GB_CAMERA_MAX_STREAMS                  4
+#define GB_CAMERA_MAX_SETTINGS_SIZE            8192
+
+/* Greybus Camera Configure Streams request payload */
+struct gb_camera_stream_config_request {
+       __le16 width;
+       __le16 height;
+       __le16 format;
+       __le16 padding;
+} __packed;
+
+struct gb_camera_configure_streams_request {
+       __u8 num_streams;
+       __u8 flags;
+#define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY  0x01
+       __le16 padding;
+       struct gb_camera_stream_config_request config[0];
+} __packed;
+
+/* Greybus Camera Configure Streams response payload */
+struct gb_camera_stream_config_response {
+       __le16 width;
+       __le16 height;
+       __le16 format;
+       __u8 virtual_channel;
+       __u8 data_type[2];
+       __le16 max_pkt_size;
+       __u8 padding;
+       __le32 max_size;
+} __packed;
+
+struct gb_camera_configure_streams_response {
+       __u8 num_streams;
+#define GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED   0x01
+       __u8 flags;
+       __u8 padding[2];
+       __le32 data_rate;
+       struct gb_camera_stream_config_response config[0];
+};
+
+/* Greybus Camera Capture request payload - response has no payload */
+struct gb_camera_capture_request {
+       __le32 request_id;
+       __u8 streams;
+       __u8 padding;
+       __le16 num_frames;
+       __u8 settings[0];
+} __packed;
+
+/* Greybus Camera Flush response payload - request has no payload */
+struct gb_camera_flush_response {
+       __le32 request_id;
+} __packed;
+
+/* Greybus Camera Metadata request payload - operation has no response */
+struct gb_camera_metadata_request {
+       __le32 request_id;
+       __le16 frame_number;
+       __u8 stream;
+       __u8 padding;
+       __u8 metadata[0];
+} __packed;
+
+/* Lights */
+
+/* Greybus Lights request types */
+#define GB_LIGHTS_TYPE_GET_LIGHTS              0x02
+#define GB_LIGHTS_TYPE_GET_LIGHT_CONFIG                0x03
+#define GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG      0x04
+#define GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG        0x05
+#define GB_LIGHTS_TYPE_SET_BRIGHTNESS          0x06
+#define GB_LIGHTS_TYPE_SET_BLINK               0x07
+#define GB_LIGHTS_TYPE_SET_COLOR               0x08
+#define GB_LIGHTS_TYPE_SET_FADE                        0x09
+#define GB_LIGHTS_TYPE_EVENT                   0x0A
+#define GB_LIGHTS_TYPE_SET_FLASH_INTENSITY     0x0B
+#define GB_LIGHTS_TYPE_SET_FLASH_STROBE                0x0C
+#define GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT       0x0D
+#define GB_LIGHTS_TYPE_GET_FLASH_FAULT         0x0E
+
+/* Greybus Light modes */
+
+/*
+ * if you add any specific mode below, update also the
+ * GB_CHANNEL_MODE_DEFINED_RANGE value accordingly
+ */
+#define GB_CHANNEL_MODE_NONE           0x00000000
+#define GB_CHANNEL_MODE_BATTERY                0x00000001
+#define GB_CHANNEL_MODE_POWER          0x00000002
+#define GB_CHANNEL_MODE_WIRELESS       0x00000004
+#define GB_CHANNEL_MODE_BLUETOOTH      0x00000008
+#define GB_CHANNEL_MODE_KEYBOARD       0x00000010
+#define GB_CHANNEL_MODE_BUTTONS                0x00000020
+#define GB_CHANNEL_MODE_NOTIFICATION   0x00000040
+#define GB_CHANNEL_MODE_ATTENTION      0x00000080
+#define GB_CHANNEL_MODE_FLASH          0x00000100
+#define GB_CHANNEL_MODE_TORCH          0x00000200
+#define GB_CHANNEL_MODE_INDICATOR      0x00000400
+
+/* Lights Mode valid bit values */
+#define GB_CHANNEL_MODE_DEFINED_RANGE  0x000004FF
+#define GB_CHANNEL_MODE_VENDOR_RANGE   0x00F00000
+
+/* Greybus Light Channels Flags */
+#define GB_LIGHT_CHANNEL_MULTICOLOR    0x00000001
+#define GB_LIGHT_CHANNEL_FADER         0x00000002
+#define GB_LIGHT_CHANNEL_BLINK         0x00000004
+
+/* get count of lights in module */
+struct gb_lights_get_lights_response {
+       __u8    lights_count;
+} __packed;
+
+/* light config request payload */
+struct gb_lights_get_light_config_request {
+       __u8    id;
+} __packed;
+
+/* light config response payload */
+struct gb_lights_get_light_config_response {
+       __u8    channel_count;
+       __u8    name[32];
+} __packed;
+
+/* channel config request payload */
+struct gb_lights_get_channel_config_request {
+       __u8    light_id;
+       __u8    channel_id;
+} __packed;
+
+/* channel flash config request payload */
+struct gb_lights_get_channel_flash_config_request {
+       __u8    light_id;
+       __u8    channel_id;
+} __packed;
+
+/* channel config response payload */
+struct gb_lights_get_channel_config_response {
+       __u8    max_brightness;
+       __le32  flags;
+       __le32  color;
+       __u8    color_name[32];
+       __le32  mode;
+       __u8    mode_name[32];
+} __packed;
+
+/* channel flash config response payload */
+struct gb_lights_get_channel_flash_config_response {
+       __le32  intensity_min_uA;
+       __le32  intensity_max_uA;
+       __le32  intensity_step_uA;
+       __le32  timeout_min_us;
+       __le32  timeout_max_us;
+       __le32  timeout_step_us;
+} __packed;
+
+/* blink request payload: response have no payload */
+struct gb_lights_blink_request {
+       __u8    light_id;
+       __u8    channel_id;
+       __le16  time_on_ms;
+       __le16  time_off_ms;
+} __packed;
+
+/* set brightness request payload: response have no payload */
+struct gb_lights_set_brightness_request {
+       __u8    light_id;
+       __u8    channel_id;
+       __u8    brightness;
+} __packed;
+
+/* set color request payload: response have no payload */
+struct gb_lights_set_color_request {
+       __u8    light_id;
+       __u8    channel_id;
+       __le32  color;
+} __packed;
+
+/* set fade request payload: response have no payload */
+struct gb_lights_set_fade_request {
+       __u8    light_id;
+       __u8    channel_id;
+       __u8    fade_in;
+       __u8    fade_out;
+} __packed;
+
+/* event request: generated by module */
+struct gb_lights_event_request {
+       __u8    light_id;
+       __u8    event;
+#define GB_LIGHTS_LIGHT_CONFIG         0x01
+} __packed;
+
+/* set flash intensity request payload: response have no payload */
+struct gb_lights_set_flash_intensity_request {
+       __u8    light_id;
+       __u8    channel_id;
+       __le32  intensity_uA;
+} __packed;
+
+/* set flash strobe state request payload: response have no payload */
+struct gb_lights_set_flash_strobe_request {
+       __u8    light_id;
+       __u8    channel_id;
+       __u8    state;
+} __packed;
+
+/* set flash timeout request payload: response have no payload */
+struct gb_lights_set_flash_timeout_request {
+       __u8    light_id;
+       __u8    channel_id;
+       __le32  timeout_us;
+} __packed;
+
+/* get flash fault request payload */
+struct gb_lights_get_flash_fault_request {
+       __u8    light_id;
+       __u8    channel_id;
+} __packed;
+
+/* get flash fault response payload */
+struct gb_lights_get_flash_fault_response {
+       __le32  fault;
+#define GB_LIGHTS_FLASH_FAULT_OVER_VOLTAGE             0x00000000
+#define GB_LIGHTS_FLASH_FAULT_TIMEOUT                  0x00000001
+#define GB_LIGHTS_FLASH_FAULT_OVER_TEMPERATURE         0x00000002
+#define GB_LIGHTS_FLASH_FAULT_SHORT_CIRCUIT            0x00000004
+#define GB_LIGHTS_FLASH_FAULT_OVER_CURRENT             0x00000008
+#define GB_LIGHTS_FLASH_FAULT_INDICATOR                        0x00000010
+#define GB_LIGHTS_FLASH_FAULT_UNDER_VOLTAGE            0x00000020
+#define GB_LIGHTS_FLASH_FAULT_INPUT_VOLTAGE            0x00000040
+#define GB_LIGHTS_FLASH_FAULT_LED_OVER_TEMPERATURE     0x00000080
+} __packed;
+
+/* Audio */
+
+#define GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE                0x02
+#define GB_AUDIO_TYPE_GET_TOPOLOGY             0x03
+#define GB_AUDIO_TYPE_GET_CONTROL              0x04
+#define GB_AUDIO_TYPE_SET_CONTROL              0x05
+#define GB_AUDIO_TYPE_ENABLE_WIDGET            0x06
+#define GB_AUDIO_TYPE_DISABLE_WIDGET           0x07
+#define GB_AUDIO_TYPE_GET_PCM                  0x08
+#define GB_AUDIO_TYPE_SET_PCM                  0x09
+#define GB_AUDIO_TYPE_SET_TX_DATA_SIZE         0x0a
+                                               /* 0x0b unused */
+#define GB_AUDIO_TYPE_ACTIVATE_TX              0x0c
+#define GB_AUDIO_TYPE_DEACTIVATE_TX            0x0d
+#define GB_AUDIO_TYPE_SET_RX_DATA_SIZE         0x0e
+                                               /* 0x0f unused */
+#define GB_AUDIO_TYPE_ACTIVATE_RX              0x10
+#define GB_AUDIO_TYPE_DEACTIVATE_RX            0x11
+#define GB_AUDIO_TYPE_JACK_EVENT               0x12
+#define GB_AUDIO_TYPE_BUTTON_EVENT             0x13
+#define GB_AUDIO_TYPE_STREAMING_EVENT          0x14
+#define GB_AUDIO_TYPE_SEND_DATA                        0x15
+
+/* Module must be able to buffer 10ms of audio data, minimum */
+#define GB_AUDIO_SAMPLE_BUFFER_MIN_US          10000
+
+#define GB_AUDIO_PCM_NAME_MAX                  32
+#define AUDIO_DAI_NAME_MAX                     32
+#define AUDIO_CONTROL_NAME_MAX                 32
+#define AUDIO_CTL_ELEM_NAME_MAX                        44
+#define AUDIO_ENUM_NAME_MAX                    64
+#define AUDIO_WIDGET_NAME_MAX                  32
+
+/* See SNDRV_PCM_FMTBIT_* in Linux source */
+#define GB_AUDIO_PCM_FMT_S8                    BIT(0)
+#define GB_AUDIO_PCM_FMT_U8                    BIT(1)
+#define GB_AUDIO_PCM_FMT_S16_LE                        BIT(2)
+#define GB_AUDIO_PCM_FMT_S16_BE                        BIT(3)
+#define GB_AUDIO_PCM_FMT_U16_LE                        BIT(4)
+#define GB_AUDIO_PCM_FMT_U16_BE                        BIT(5)
+#define GB_AUDIO_PCM_FMT_S24_LE                        BIT(6)
+#define GB_AUDIO_PCM_FMT_S24_BE                        BIT(7)
+#define GB_AUDIO_PCM_FMT_U24_LE                        BIT(8)
+#define GB_AUDIO_PCM_FMT_U24_BE                        BIT(9)
+#define GB_AUDIO_PCM_FMT_S32_LE                        BIT(10)
+#define GB_AUDIO_PCM_FMT_S32_BE                        BIT(11)
+#define GB_AUDIO_PCM_FMT_U32_LE                        BIT(12)
+#define GB_AUDIO_PCM_FMT_U32_BE                        BIT(13)
+
+/* See SNDRV_PCM_RATE_* in Linux source */
+#define GB_AUDIO_PCM_RATE_5512                 BIT(0)
+#define GB_AUDIO_PCM_RATE_8000                 BIT(1)
+#define GB_AUDIO_PCM_RATE_11025                        BIT(2)
+#define GB_AUDIO_PCM_RATE_16000                        BIT(3)
+#define GB_AUDIO_PCM_RATE_22050                        BIT(4)
+#define GB_AUDIO_PCM_RATE_32000                        BIT(5)
+#define GB_AUDIO_PCM_RATE_44100                        BIT(6)
+#define GB_AUDIO_PCM_RATE_48000                        BIT(7)
+#define GB_AUDIO_PCM_RATE_64000                        BIT(8)
+#define GB_AUDIO_PCM_RATE_88200                        BIT(9)
+#define GB_AUDIO_PCM_RATE_96000                        BIT(10)
+#define GB_AUDIO_PCM_RATE_176400               BIT(11)
+#define GB_AUDIO_PCM_RATE_192000               BIT(12)
+
+#define GB_AUDIO_STREAM_TYPE_CAPTURE           0x1
+#define GB_AUDIO_STREAM_TYPE_PLAYBACK          0x2
+
+#define GB_AUDIO_CTL_ELEM_ACCESS_READ          BIT(0)
+#define GB_AUDIO_CTL_ELEM_ACCESS_WRITE         BIT(1)
+
+/* See SNDRV_CTL_ELEM_TYPE_* in Linux source */
+#define GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN         0x01
+#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER         0x02
+#define GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED      0x03
+#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER64       0x06
+
+/* See SNDRV_CTL_ELEM_IFACE_* in Linux source */
+#define GB_AUDIO_CTL_ELEM_IFACE_CARD           0x00
+#define GB_AUDIO_CTL_ELEM_IFACE_HWDEP          0x01
+#define GB_AUDIO_CTL_ELEM_IFACE_MIXER          0x02
+#define GB_AUDIO_CTL_ELEM_IFACE_PCM            0x03
+#define GB_AUDIO_CTL_ELEM_IFACE_RAWMIDI                0x04
+#define GB_AUDIO_CTL_ELEM_IFACE_TIMER          0x05
+#define GB_AUDIO_CTL_ELEM_IFACE_SEQUENCER      0x06
+
+/* SNDRV_CTL_ELEM_ACCESS_* in Linux source */
+#define GB_AUDIO_ACCESS_READ                   BIT(0)
+#define GB_AUDIO_ACCESS_WRITE                  BIT(1)
+#define GB_AUDIO_ACCESS_VOLATILE               BIT(2)
+#define GB_AUDIO_ACCESS_TIMESTAMP              BIT(3)
+#define GB_AUDIO_ACCESS_TLV_READ               BIT(4)
+#define GB_AUDIO_ACCESS_TLV_WRITE              BIT(5)
+#define GB_AUDIO_ACCESS_TLV_COMMAND            BIT(6)
+#define GB_AUDIO_ACCESS_INACTIVE               BIT(7)
+#define GB_AUDIO_ACCESS_LOCK                   BIT(8)
+#define GB_AUDIO_ACCESS_OWNER                  BIT(9)
+
+/* enum snd_soc_dapm_type */
+#define GB_AUDIO_WIDGET_TYPE_INPUT             0x0
+#define GB_AUDIO_WIDGET_TYPE_OUTPUT            0x1
+#define GB_AUDIO_WIDGET_TYPE_MUX               0x2
+#define GB_AUDIO_WIDGET_TYPE_VIRT_MUX          0x3
+#define GB_AUDIO_WIDGET_TYPE_VALUE_MUX         0x4
+#define GB_AUDIO_WIDGET_TYPE_MIXER             0x5
+#define GB_AUDIO_WIDGET_TYPE_MIXER_NAMED_CTL   0x6
+#define GB_AUDIO_WIDGET_TYPE_PGA               0x7
+#define GB_AUDIO_WIDGET_TYPE_OUT_DRV           0x8
+#define GB_AUDIO_WIDGET_TYPE_ADC               0x9
+#define GB_AUDIO_WIDGET_TYPE_DAC               0xa
+#define GB_AUDIO_WIDGET_TYPE_MICBIAS           0xb
+#define GB_AUDIO_WIDGET_TYPE_MIC               0xc
+#define GB_AUDIO_WIDGET_TYPE_HP                        0xd
+#define GB_AUDIO_WIDGET_TYPE_SPK               0xe
+#define GB_AUDIO_WIDGET_TYPE_LINE              0xf
+#define GB_AUDIO_WIDGET_TYPE_SWITCH            0x10
+#define GB_AUDIO_WIDGET_TYPE_VMID              0x11
+#define GB_AUDIO_WIDGET_TYPE_PRE               0x12
+#define GB_AUDIO_WIDGET_TYPE_POST              0x13
+#define GB_AUDIO_WIDGET_TYPE_SUPPLY            0x14
+#define GB_AUDIO_WIDGET_TYPE_REGULATOR_SUPPLY  0x15
+#define GB_AUDIO_WIDGET_TYPE_CLOCK_SUPPLY      0x16
+#define GB_AUDIO_WIDGET_TYPE_AIF_IN            0x17
+#define GB_AUDIO_WIDGET_TYPE_AIF_OUT           0x18
+#define GB_AUDIO_WIDGET_TYPE_SIGGEN            0x19
+#define GB_AUDIO_WIDGET_TYPE_DAI_IN            0x1a
+#define GB_AUDIO_WIDGET_TYPE_DAI_OUT           0x1b
+#define GB_AUDIO_WIDGET_TYPE_DAI_LINK          0x1c
+
+#define GB_AUDIO_WIDGET_STATE_DISABLED         0x01
+#define GB_AUDIO_WIDGET_STATE_ENAABLED         0x02
+
+#define GB_AUDIO_JACK_EVENT_INSERTION          0x1
+#define GB_AUDIO_JACK_EVENT_REMOVAL            0x2
+
+#define GB_AUDIO_BUTTON_EVENT_PRESS            0x1
+#define GB_AUDIO_BUTTON_EVENT_RELEASE          0x2
+
+#define GB_AUDIO_STREAMING_EVENT_UNSPECIFIED   0x1
+#define GB_AUDIO_STREAMING_EVENT_HALT          0x2
+#define GB_AUDIO_STREAMING_EVENT_INTERNAL_ERROR        0x3
+#define GB_AUDIO_STREAMING_EVENT_PROTOCOL_ERROR        0x4
+#define GB_AUDIO_STREAMING_EVENT_FAILURE       0x5
+#define GB_AUDIO_STREAMING_EVENT_UNDERRUN      0x6
+#define GB_AUDIO_STREAMING_EVENT_OVERRUN       0x7
+#define GB_AUDIO_STREAMING_EVENT_CLOCKING      0x8
+#define GB_AUDIO_STREAMING_EVENT_DATA_LEN      0x9
+
+#define GB_AUDIO_INVALID_INDEX                 0xff
+
+/* enum snd_jack_types */
+#define GB_AUDIO_JACK_HEADPHONE                        0x0000001
+#define GB_AUDIO_JACK_MICROPHONE               0x0000002
+#define GB_AUDIO_JACK_HEADSET                  (GB_AUDIO_JACK_HEADPHONE | \
+                                                GB_AUDIO_JACK_MICROPHONE)
+#define GB_AUDIO_JACK_LINEOUT                  0x0000004
+#define GB_AUDIO_JACK_MECHANICAL               0x0000008
+#define GB_AUDIO_JACK_VIDEOOUT                 0x0000010
+#define GB_AUDIO_JACK_AVOUT                    (GB_AUDIO_JACK_LINEOUT | \
+                                                GB_AUDIO_JACK_VIDEOOUT)
+#define GB_AUDIO_JACK_LINEIN                   0x0000020
+#define GB_AUDIO_JACK_OC_HPHL                  0x0000040
+#define GB_AUDIO_JACK_OC_HPHR                  0x0000080
+#define GB_AUDIO_JACK_MICROPHONE2              0x0000200
+#define GB_AUDIO_JACK_ANC_HEADPHONE            (GB_AUDIO_JACK_HEADPHONE | \
+                                                GB_AUDIO_JACK_MICROPHONE | \
+                                                GB_AUDIO_JACK_MICROPHONE2)
+/* Kept separate from switches to facilitate implementation */
+#define GB_AUDIO_JACK_BTN_0                    0x4000000
+#define GB_AUDIO_JACK_BTN_1                    0x2000000
+#define GB_AUDIO_JACK_BTN_2                    0x1000000
+#define GB_AUDIO_JACK_BTN_3                    0x0800000
+
+struct gb_audio_pcm {
+       __u8    stream_name[GB_AUDIO_PCM_NAME_MAX];
+       __le32  formats;        /* GB_AUDIO_PCM_FMT_* */
+       __le32  rates;          /* GB_AUDIO_PCM_RATE_* */
+       __u8    chan_min;
+       __u8    chan_max;
+       __u8    sig_bits;       /* number of bits of content */
+} __packed;
+
+struct gb_audio_dai {
+       __u8                    name[AUDIO_DAI_NAME_MAX];
+       __le16                  data_cport;
+       struct gb_audio_pcm     capture;
+       struct gb_audio_pcm     playback;
+} __packed;
+
+struct gb_audio_integer {
+       __le32  min;
+       __le32  max;
+       __le32  step;
+} __packed;
+
+struct gb_audio_integer64 {
+       __le64  min;
+       __le64  max;
+       __le64  step;
+} __packed;
+
+struct gb_audio_enumerated {
+       __le32  items;
+       __le16  names_length;
+       __u8    names[0];
+} __packed;
+
+struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */
+       __u8            type;           /* GB_AUDIO_CTL_ELEM_TYPE_* */
+       __le16          dimen[4];
+       union {
+               struct gb_audio_integer         integer;
+               struct gb_audio_integer64       integer64;
+               struct gb_audio_enumerated      enumerated;
+       } value;
+} __packed;
+
+struct gb_audio_ctl_elem_value { /* See snd_ctl_elem_value in Linux source */
+       __le64                          timestamp; /* XXX needed? */
+       union {
+               __le32  integer_value[2];       /* consider CTL_DOUBLE_xxx */
+               __le64  integer64_value[2];
+               __le32  enumerated_item[2];
+       } value;
+} __packed;
+
+struct gb_audio_control {
+       __u8    name[AUDIO_CONTROL_NAME_MAX];
+       __u8    id;             /* 0-63 */
+       __u8    iface;          /* GB_AUDIO_IFACE_* */
+       __le16  data_cport;
+       __le32  access;         /* GB_AUDIO_ACCESS_* */
+       __u8    count;          /* count of same elements */
+       __u8    count_values;   /* count of values, max=2 for CTL_DOUBLE_xxx */
+       struct gb_audio_ctl_elem_info   info;
+} __packed;
+
+struct gb_audio_widget {
+       __u8    name[AUDIO_WIDGET_NAME_MAX];
+       __u8    sname[AUDIO_WIDGET_NAME_MAX];
+       __u8    id;
+       __u8    type;           /* GB_AUDIO_WIDGET_TYPE_* */
+       __u8    state;          /* GB_AUDIO_WIDGET_STATE_* */
+       __u8    ncontrols;
+       struct gb_audio_control ctl[0]; /* 'ncontrols' entries */
+} __packed;
+
+struct gb_audio_route {
+       __u8    source_id;      /* widget id */
+       __u8    destination_id; /* widget id */
+       __u8    control_id;     /* 0-63 */
+       __u8    index;          /* Selection within the control */
+} __packed;
+
+struct gb_audio_topology {
+       __u8    num_dais;
+       __u8    num_controls;
+       __u8    num_widgets;
+       __u8    num_routes;
+       __le32  size_dais;
+       __le32  size_controls;
+       __le32  size_widgets;
+       __le32  size_routes;
+       __le32  jack_type;
+       /*
+        * struct gb_audio_dai          dai[num_dais];
+        * struct gb_audio_control      controls[num_controls];
+        * struct gb_audio_widget       widgets[num_widgets];
+        * struct gb_audio_route        routes[num_routes];
+        */
+       __u8    data[0];
+} __packed;
+
+struct gb_audio_get_topology_size_response {
+       __le16  size;
+} __packed;
+
+struct gb_audio_get_topology_response {
+       struct gb_audio_topology        topology;
+} __packed;
+
+struct gb_audio_get_control_request {
+       __u8    control_id;
+       __u8    index;
+} __packed;
+
+struct gb_audio_get_control_response {
+       struct gb_audio_ctl_elem_value  value;
+} __packed;
+
+struct gb_audio_set_control_request {
+       __u8    control_id;
+       __u8    index;
+       struct gb_audio_ctl_elem_value  value;
+} __packed;
+
+struct gb_audio_enable_widget_request {
+       __u8    widget_id;
+} __packed;
+
+struct gb_audio_disable_widget_request {
+       __u8    widget_id;
+} __packed;
+
+struct gb_audio_get_pcm_request {
+       __le16  data_cport;
+} __packed;
+
+struct gb_audio_get_pcm_response {
+       __le32  format;
+       __le32  rate;
+       __u8    channels;
+       __u8    sig_bits;
+} __packed;
+
+struct gb_audio_set_pcm_request {
+       __le16  data_cport;
+       __le32  format;
+       __le32  rate;
+       __u8    channels;
+       __u8    sig_bits;
+} __packed;
+
+struct gb_audio_set_tx_data_size_request {
+       __le16  data_cport;
+       __le16  size;
+} __packed;
+
+struct gb_audio_activate_tx_request {
+       __le16  data_cport;
+} __packed;
+
+struct gb_audio_deactivate_tx_request {
+       __le16  data_cport;
+} __packed;
+
+struct gb_audio_set_rx_data_size_request {
+       __le16  data_cport;
+       __le16  size;
+} __packed;
+
+struct gb_audio_activate_rx_request {
+       __le16  data_cport;
+} __packed;
+
+struct gb_audio_deactivate_rx_request {
+       __le16  data_cport;
+} __packed;
+
+struct gb_audio_jack_event_request {
+       __u8    widget_id;
+       __u8    jack_attribute;
+       __u8    event;
+} __packed;
+
+struct gb_audio_button_event_request {
+       __u8    widget_id;
+       __u8    button_id;
+       __u8    event;
+} __packed;
+
+struct gb_audio_streaming_event_request {
+       __le16  data_cport;
+       __u8    event;
+} __packed;
+
+struct gb_audio_send_data_request {
+       __le64  timestamp;
+       __u8    data[0];
+} __packed;
+
+
+/* Log */
+
+/* operations */
+#define GB_LOG_TYPE_SEND_LOG   0x02
+
+/* length */
+#define GB_LOG_MAX_LEN         1024
+
+struct gb_log_send_log_request {
+       __le16  len;
+       __u8    msg[0];
+} __packed;
+
+#endif /* __GREYBUS_PROTOCOLS_H */
+
diff --git a/drivers/staging/greybus/greybus_trace.h b/drivers/staging/greybus/greybus_trace.h
new file mode 100644 (file)
index 0000000..6f8692d
--- /dev/null
@@ -0,0 +1,531 @@
+/*
+ * Greybus driver and device API
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM greybus
+
+#if !defined(_TRACE_GREYBUS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_GREYBUS_H
+
+#include <linux/tracepoint.h>
+
+struct gb_message;
+struct gb_operation;
+struct gb_connection;
+struct gb_bundle;
+struct gb_host_device;
+
+DECLARE_EVENT_CLASS(gb_message,
+
+       TP_PROTO(struct gb_message *message),
+
+       TP_ARGS(message),
+
+       TP_STRUCT__entry(
+               __field(u16, size)
+               __field(u16, operation_id)
+               __field(u8, type)
+               __field(u8, result)
+       ),
+
+       TP_fast_assign(
+               __entry->size = le16_to_cpu(message->header->size);
+               __entry->operation_id =
+                       le16_to_cpu(message->header->operation_id);
+               __entry->type = message->header->type;
+               __entry->result = message->header->result;
+       ),
+
+       TP_printk("size=%hu operation_id=0x%04x type=0x%02x result=0x%02x",
+                 __entry->size, __entry->operation_id,
+                 __entry->type, __entry->result)
+);
+
+#define DEFINE_MESSAGE_EVENT(name)                                     \
+               DEFINE_EVENT(gb_message, name,                          \
+                               TP_PROTO(struct gb_message *message),   \
+                               TP_ARGS(message))
+
+/*
+ * Occurs immediately before calling a host device's message_send()
+ * method.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_send);
+
+/*
+ * Occurs after an incoming request message has been received
+ */
+DEFINE_MESSAGE_EVENT(gb_message_recv_request);
+
+/*
+ * Occurs after an incoming response message has been received,
+ * after its matching request has been found.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_recv_response);
+
+/*
+ * Occurs after an operation has been canceled, possibly before the
+ * cancellation is complete.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_cancel_outgoing);
+
+/*
+ * Occurs when an incoming request is cancelled; if the response has
+ * been queued for sending, this occurs after it is sent.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_cancel_incoming);
+
+/*
+ * Occurs in the host driver message_send() function just prior to
+ * handing off the data to be processed by hardware.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_submit);
+
+#undef DEFINE_MESSAGE_EVENT
+
+DECLARE_EVENT_CLASS(gb_operation,
+
+       TP_PROTO(struct gb_operation *operation),
+
+       TP_ARGS(operation),
+
+       TP_STRUCT__entry(
+               __field(u16, cport_id)  /* CPort of HD side of connection */
+               __field(u16, id)        /* Operation ID */
+               __field(u8, type)
+               __field(unsigned long, flags)
+               __field(int, active)
+               __field(int, waiters)
+               __field(int, errno)
+       ),
+
+       TP_fast_assign(
+               __entry->cport_id = operation->connection->hd_cport_id;
+               __entry->id = operation->id;
+               __entry->type = operation->type;
+               __entry->flags = operation->flags;
+               __entry->active = operation->active;
+               __entry->waiters = atomic_read(&operation->waiters);
+               __entry->errno = operation->errno;
+       ),
+
+       TP_printk("id=%04x type=0x%02x cport_id=%04x flags=0x%lx active=%d waiters=%d errno=%d",
+                 __entry->id, __entry->cport_id, __entry->type, __entry->flags,
+                 __entry->active, __entry->waiters, __entry->errno)
+);
+
+#define DEFINE_OPERATION_EVENT(name)                                   \
+               DEFINE_EVENT(gb_operation, name,                        \
+                               TP_PROTO(struct gb_operation *operation), \
+                               TP_ARGS(operation))
+
+/*
+ * Occurs after a new operation is created for an outgoing request
+ * has been successfully created.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_create);
+
+/*
+ * Occurs after a new core operation has been created.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_create_core);
+
+/*
+ * Occurs after a new operation has been created for an incoming
+ * request has been successfully created and initialized.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_create_incoming);
+
+/*
+ * Occurs when the last reference to an operation has been dropped,
+ * prior to freeing resources.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_destroy);
+
+/*
+ * Occurs when an operation has been marked active, after updating
+ * its active count.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_get_active);
+
+/*
+ * Occurs when an operation has been marked active, before updating
+ * its active count.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_put_active);
+
+#undef DEFINE_OPERATION_EVENT
+
+DECLARE_EVENT_CLASS(gb_connection,
+
+       TP_PROTO(struct gb_connection *connection),
+
+       TP_ARGS(connection),
+
+       TP_STRUCT__entry(
+               __field(int, hd_bus_id)
+               __field(u8, bundle_id)
+               /* name contains "hd_cport_id/intf_id:cport_id" */
+               __dynamic_array(char, name, sizeof(connection->name))
+               __field(enum gb_connection_state, state)
+               __field(unsigned long, flags)
+       ),
+
+       TP_fast_assign(
+               __entry->hd_bus_id = connection->hd->bus_id;
+               __entry->bundle_id = connection->bundle ?
+                               connection->bundle->id : BUNDLE_ID_NONE;
+               memcpy(__get_str(name), connection->name,
+                                       sizeof(connection->name));
+               __entry->state = connection->state;
+               __entry->flags = connection->flags;
+       ),
+
+       TP_printk("hd_bus_id=%d bundle_id=0x%02x name=\"%s\" state=%u flags=0x%lx",
+                 __entry->hd_bus_id, __entry->bundle_id, __get_str(name),
+                 (unsigned int)__entry->state, __entry->flags)
+);
+
+#define DEFINE_CONNECTION_EVENT(name)                                  \
+               DEFINE_EVENT(gb_connection, name,                       \
+                               TP_PROTO(struct gb_connection *connection), \
+                               TP_ARGS(connection))
+
+/*
+ * Occurs after a new connection is successfully created.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_create);
+
+/*
+ * Occurs when the last reference to a connection has been dropped,
+ * before its resources are freed.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_release);
+
+/*
+ * Occurs when a new reference to connection is added, currently
+ * only when a message over the connection is received.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_get);
+
+/*
+ * Occurs when a new reference to connection is dropped, after a
+ * a received message is handled, or when the connection is
+ * destroyed.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_put);
+
+/*
+ * Occurs when a request to enable a connection is made, either for
+ * transmit only, or for both transmit and receive.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_enable);
+
+/*
+ * Occurs when a request to disable a connection is made, either for
+ * receive only, or for both transmit and receive.  Also occurs when
+ * a request to forcefully disable a connection is made.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_disable);
+
+#undef DEFINE_CONNECTION_EVENT
+
+DECLARE_EVENT_CLASS(gb_bundle,
+
+       TP_PROTO(struct gb_bundle *bundle),
+
+       TP_ARGS(bundle),
+
+       TP_STRUCT__entry(
+               __field(u8, intf_id)
+               __field(u8, id)
+               __field(u8, class)
+               __field(size_t, num_cports)
+       ),
+
+       TP_fast_assign(
+               __entry->intf_id = bundle->intf->interface_id;
+               __entry->id = bundle->id;
+               __entry->class = bundle->class;
+               __entry->num_cports = bundle->num_cports;
+       ),
+
+       TP_printk("intf_id=0x%02x id=%02x class=0x%02x num_cports=%zu",
+                 __entry->intf_id, __entry->id, __entry->class,
+                 __entry->num_cports)
+);
+
+#define DEFINE_BUNDLE_EVENT(name)                                      \
+               DEFINE_EVENT(gb_bundle, name,                   \
+                               TP_PROTO(struct gb_bundle *bundle), \
+                               TP_ARGS(bundle))
+
+/*
+ * Occurs after a new bundle is successfully created.
+ */
+DEFINE_BUNDLE_EVENT(gb_bundle_create);
+
+/*
+ * Occurs when the last reference to a bundle has been dropped,
+ * before its resources are freed.
+ */
+DEFINE_BUNDLE_EVENT(gb_bundle_release);
+
+/*
+ * Occurs when a bundle is added to an interface when the interface
+ * is enabled.
+ */
+DEFINE_BUNDLE_EVENT(gb_bundle_add);
+
+/*
+ * Occurs when a registered bundle gets destroyed, normally at the
+ * time an interface is disabled.
+ */
+DEFINE_BUNDLE_EVENT(gb_bundle_destroy);
+
+#undef DEFINE_BUNDLE_EVENT
+
+DECLARE_EVENT_CLASS(gb_interface,
+
+       TP_PROTO(struct gb_interface *intf),
+
+       TP_ARGS(intf),
+
+       TP_STRUCT__entry(
+               __field(u8, module_id)
+               __field(u8, id)         /* Interface id */
+               __field(u8, device_id)
+               __field(int, disconnected)      /* bool */
+               __field(int, ejected)           /* bool */
+               __field(int, active)            /* bool */
+               __field(int, enabled)           /* bool */
+               __field(int, mode_switch)       /* bool */
+       ),
+
+       TP_fast_assign(
+               __entry->module_id = intf->module->module_id;
+               __entry->id = intf->interface_id;
+               __entry->device_id = intf->device_id;
+               __entry->disconnected = intf->disconnected;
+               __entry->ejected = intf->ejected;
+               __entry->active = intf->active;
+               __entry->enabled = intf->enabled;
+               __entry->mode_switch = intf->mode_switch;
+       ),
+
+       TP_printk("intf_id=%hhu device_id=%hhu module_id=%hhu D=%d J=%d A=%d E=%d M=%d",
+               __entry->id, __entry->device_id, __entry->module_id,
+               __entry->disconnected, __entry->ejected, __entry->active,
+               __entry->enabled, __entry->mode_switch)
+);
+
+#define DEFINE_INTERFACE_EVENT(name)                                   \
+               DEFINE_EVENT(gb_interface, name,                        \
+                               TP_PROTO(struct gb_interface *intf),    \
+                               TP_ARGS(intf))
+
+/*
+ * Occurs after a new interface is successfully created.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_create);
+
+/*
+ * Occurs after the last reference to an interface has been dropped.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_release);
+
+/*
+ * Occurs after an interface been registerd.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_add);
+
+/*
+ * Occurs when a registered interface gets deregisterd.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_del);
+
+/*
+ * Occurs when a registered interface has been successfully
+ * activated.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_activate);
+
+/*
+ * Occurs when an activated interface is being deactivated.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_deactivate);
+
+/*
+ * Occurs when an interface has been successfully enabled.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_enable);
+
+/*
+ * Occurs when an enabled interface is being disabled.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_disable);
+
+#undef DEFINE_INTERFACE_EVENT
+
+DECLARE_EVENT_CLASS(gb_module,
+
+       TP_PROTO(struct gb_module *module),
+
+       TP_ARGS(module),
+
+       TP_STRUCT__entry(
+               __field(int, hd_bus_id)
+               __field(u8, module_id)
+               __field(size_t, num_interfaces)
+               __field(int, disconnected)      /* bool */
+       ),
+
+       TP_fast_assign(
+               __entry->hd_bus_id = module->hd->bus_id;
+               __entry->module_id = module->module_id;
+               __entry->num_interfaces = module->num_interfaces;
+               __entry->disconnected = module->disconnected;
+       ),
+
+       TP_printk("hd_bus_id=%d module_id=%hhu num_interfaces=%zu disconnected=%d",
+               __entry->hd_bus_id, __entry->module_id,
+               __entry->num_interfaces, __entry->disconnected)
+);
+
+#define DEFINE_MODULE_EVENT(name)                                      \
+               DEFINE_EVENT(gb_module, name,                           \
+                               TP_PROTO(struct gb_module *module),     \
+                               TP_ARGS(module))
+
+/*
+ * Occurs after a new module is successfully created, before
+ * creating any of its interfaces.
+ */
+DEFINE_MODULE_EVENT(gb_module_create);
+
+/*
+ * Occurs after the last reference to a module has been dropped.
+ */
+DEFINE_MODULE_EVENT(gb_module_release);
+
+/*
+ * Occurs after a module is successfully created, before registering
+ * any of its interfaces.
+ */
+DEFINE_MODULE_EVENT(gb_module_add);
+
+/*
+ * Occurs when a module is deleted, before deregistering its
+ * interfaces.
+ */
+DEFINE_MODULE_EVENT(gb_module_del);
+
+#undef DEFINE_MODULE_EVENT
+
+DECLARE_EVENT_CLASS(gb_host_device,
+
+       TP_PROTO(struct gb_host_device *hd),
+
+       TP_ARGS(hd),
+
+       TP_STRUCT__entry(
+               __field(int, bus_id)
+               __field(size_t, num_cports)
+               __field(size_t, buffer_size_max)
+       ),
+
+       TP_fast_assign(
+               __entry->bus_id = hd->bus_id;
+               __entry->num_cports = hd->num_cports;
+               __entry->buffer_size_max = hd->buffer_size_max;
+       ),
+
+       TP_printk("bus_id=%d num_cports=%zu mtu=%zu",
+               __entry->bus_id, __entry->num_cports,
+               __entry->buffer_size_max)
+);
+
+#define DEFINE_HD_EVENT(name)                                          \
+               DEFINE_EVENT(gb_host_device, name,                      \
+                               TP_PROTO(struct gb_host_device *hd),    \
+                               TP_ARGS(hd))
+
+/*
+ * Occurs after a new host device is successfully created, before
+ * its SVC has been set up.
+ */
+DEFINE_HD_EVENT(gb_hd_create);
+
+/*
+ * Occurs after the last reference to a host device has been
+ * dropped.
+ */
+DEFINE_HD_EVENT(gb_hd_release);
+
+/*
+ * Occurs after a new host device has been added, after the
+ * connection to its SVC has been enabled.
+ */
+DEFINE_HD_EVENT(gb_hd_add);
+
+/*
+ * Occurs when a host device is being disconnected from the AP USB
+ * host controller.
+ */
+DEFINE_HD_EVENT(gb_hd_del);
+
+/*
+ * Occurs when a host device has passed received data to the Greybus
+ * core, after it has been determined it is destined for a valid
+ * CPort.
+ */
+DEFINE_HD_EVENT(gb_hd_in);
+
+#undef DEFINE_HD_EVENT
+
+/*
+ * Occurs on a TimeSync synchronization event or a TimeSync ping event.
+ */
+TRACE_EVENT(gb_timesync_irq,
+
+       TP_PROTO(u8 ping, u8 strobe, u8 count, u64 frame_time),
+
+       TP_ARGS(ping, strobe, count, frame_time),
+
+       TP_STRUCT__entry(
+               __field(u8, ping)
+               __field(u8, strobe)
+               __field(u8, count)
+               __field(u64, frame_time)
+       ),
+
+       TP_fast_assign(
+               __entry->ping = ping;
+               __entry->strobe = strobe;
+               __entry->count = count;
+               __entry->frame_time = frame_time;
+       ),
+
+       TP_printk("%s %d/%d frame-time %llu\n",
+                 __entry->ping ? "ping" : "strobe", __entry->strobe,
+                 __entry->count, __entry->frame_time)
+);
+
+#endif /* _TRACE_GREYBUS_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+/*
+ * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal
+ */
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE greybus_trace
+#include <trace/define_trace.h>
+
diff --git a/drivers/staging/greybus/hd.c b/drivers/staging/greybus/hd.c
new file mode 100644 (file)
index 0000000..185ae3f
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+ * Greybus Host Device
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_release);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_add);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_del);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_in);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_message_submit);
+
+static struct ida gb_hd_bus_id_map;
+
+int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
+                bool async)
+{
+       if (!hd || !hd->driver || !hd->driver->output)
+               return -EINVAL;
+       return hd->driver->output(hd, req, size, cmd, async);
+}
+EXPORT_SYMBOL_GPL(gb_hd_output);
+
+static ssize_t bus_id_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct gb_host_device *hd = to_gb_host_device(dev);
+
+       return sprintf(buf, "%d\n", hd->bus_id);
+}
+static DEVICE_ATTR_RO(bus_id);
+
+static struct attribute *bus_attrs[] = {
+       &dev_attr_bus_id.attr,
+       NULL
+};
+ATTRIBUTE_GROUPS(bus);
+
+int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id)
+{
+       struct ida *id_map = &hd->cport_id_map;
+       int ret;
+
+       ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL);
+       if (ret < 0) {
+               dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id);
+               return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gb_hd_cport_reserve);
+
+void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id)
+{
+       struct ida *id_map = &hd->cport_id_map;
+
+       ida_simple_remove(id_map, cport_id);
+}
+EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved);
+
+/* Locking: Caller guarantees serialisation */
+int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
+                               unsigned long flags)
+{
+       struct ida *id_map = &hd->cport_id_map;
+       int ida_start, ida_end;
+
+       if (hd->driver->cport_allocate)
+               return hd->driver->cport_allocate(hd, cport_id, flags);
+
+       if (cport_id < 0) {
+               ida_start = 0;
+               ida_end = hd->num_cports;
+       } else if (cport_id < hd->num_cports) {
+               ida_start = cport_id;
+               ida_end = cport_id + 1;
+       } else {
+               dev_err(&hd->dev, "cport %d not available\n", cport_id);
+               return -EINVAL;
+       }
+
+       return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
+}
+
+/* Locking: Caller guarantees serialisation */
+void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id)
+{
+       if (hd->driver->cport_release) {
+               hd->driver->cport_release(hd, cport_id);
+               return;
+       }
+
+       ida_simple_remove(&hd->cport_id_map, cport_id);
+}
+
+static void gb_hd_release(struct device *dev)
+{
+       struct gb_host_device *hd = to_gb_host_device(dev);
+
+       trace_gb_hd_release(hd);
+
+       if (hd->svc)
+               gb_svc_put(hd->svc);
+       ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id);
+       ida_destroy(&hd->cport_id_map);
+       kfree(hd);
+}
+
+struct device_type greybus_hd_type = {
+       .name           = "greybus_host_device",
+       .release        = gb_hd_release,
+};
+
+struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
+                                       struct device *parent,
+                                       size_t buffer_size_max,
+                                       size_t num_cports)
+{
+       struct gb_host_device *hd;
+       int ret;
+
+       /*
+        * Validate that the driver implements all of the callbacks
+        * so that we don't have to every time we make them.
+        */
+       if ((!driver->message_send) || (!driver->message_cancel)) {
+               dev_err(parent, "mandatory hd-callbacks missing\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (buffer_size_max < GB_OPERATION_MESSAGE_SIZE_MIN) {
+               dev_err(parent, "greybus host-device buffers too small\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (num_cports == 0 || num_cports > CPORT_ID_MAX + 1) {
+               dev_err(parent, "Invalid number of CPorts: %zu\n", num_cports);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /*
+        * Make sure to never allocate messages larger than what the Greybus
+        * protocol supports.
+        */
+       if (buffer_size_max > GB_OPERATION_MESSAGE_SIZE_MAX) {
+               dev_warn(parent, "limiting buffer size to %u\n",
+                        GB_OPERATION_MESSAGE_SIZE_MAX);
+               buffer_size_max = GB_OPERATION_MESSAGE_SIZE_MAX;
+       }
+
+       hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL);
+       if (!hd)
+               return ERR_PTR(-ENOMEM);
+
+       ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL);
+       if (ret < 0) {
+               kfree(hd);
+               return ERR_PTR(ret);
+       }
+       hd->bus_id = ret;
+
+       hd->driver = driver;
+       INIT_LIST_HEAD(&hd->modules);
+       INIT_LIST_HEAD(&hd->connections);
+       ida_init(&hd->cport_id_map);
+       hd->buffer_size_max = buffer_size_max;
+       hd->num_cports = num_cports;
+
+       hd->dev.parent = parent;
+       hd->dev.bus = &greybus_bus_type;
+       hd->dev.type = &greybus_hd_type;
+       hd->dev.groups = bus_groups;
+       hd->dev.dma_mask = hd->dev.parent->dma_mask;
+       device_initialize(&hd->dev);
+       dev_set_name(&hd->dev, "greybus%d", hd->bus_id);
+
+       trace_gb_hd_create(hd);
+
+       hd->svc = gb_svc_create(hd);
+       if (!hd->svc) {
+               dev_err(&hd->dev, "failed to create svc\n");
+               put_device(&hd->dev);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       return hd;
+}
+EXPORT_SYMBOL_GPL(gb_hd_create);
+
+int gb_hd_add(struct gb_host_device *hd)
+{
+       int ret;
+
+       ret = device_add(&hd->dev);
+       if (ret)
+               return ret;
+
+       ret = gb_svc_add(hd->svc);
+       if (ret) {
+               device_del(&hd->dev);
+               return ret;
+       }
+
+       trace_gb_hd_add(hd);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gb_hd_add);
+
+void gb_hd_del(struct gb_host_device *hd)
+{
+       trace_gb_hd_del(hd);
+
+       /*
+        * Tear down the svc and flush any on-going hotplug processing before
+        * removing the remaining interfaces.
+        */
+       gb_svc_del(hd->svc);
+
+       device_del(&hd->dev);
+}
+EXPORT_SYMBOL_GPL(gb_hd_del);
+
+void gb_hd_shutdown(struct gb_host_device *hd)
+{
+       gb_svc_del(hd->svc);
+}
+EXPORT_SYMBOL_GPL(gb_hd_shutdown);
+
+void gb_hd_put(struct gb_host_device *hd)
+{
+       put_device(&hd->dev);
+}
+EXPORT_SYMBOL_GPL(gb_hd_put);
+
+int __init gb_hd_init(void)
+{
+       ida_init(&gb_hd_bus_id_map);
+
+       return 0;
+}
+
+void gb_hd_exit(void)
+{
+       ida_destroy(&gb_hd_bus_id_map);
+}
diff --git a/drivers/staging/greybus/hd.h b/drivers/staging/greybus/hd.h
new file mode 100644 (file)
index 0000000..c4250cf
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Greybus Host Device
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __HD_H
+#define __HD_H
+
+struct gb_host_device;
+struct gb_message;
+
+struct gb_hd_driver {
+       size_t  hd_priv_size;
+
+       int (*cport_allocate)(struct gb_host_device *hd, int cport_id,
+                               unsigned long flags);
+       void (*cport_release)(struct gb_host_device *hd, u16 cport_id);
+       int (*cport_enable)(struct gb_host_device *hd, u16 cport_id,
+                               unsigned long flags);
+       int (*cport_disable)(struct gb_host_device *hd, u16 cport_id);
+       int (*cport_connected)(struct gb_host_device *hd, u16 cport_id);
+       int (*cport_flush)(struct gb_host_device *hd, u16 cport_id);
+       int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id,
+                               u8 phase, unsigned int timeout);
+       int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id,
+                               size_t peer_space, unsigned int timeout);
+       int (*cport_clear)(struct gb_host_device *hd, u16 cport_id);
+
+       int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id,
+                       struct gb_message *message, gfp_t gfp_mask);
+       void (*message_cancel)(struct gb_message *message);
+       int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id);
+       int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id);
+       int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
+                     bool async);
+       int (*timesync_enable)(struct gb_host_device *hd, u8 count,
+                              u64 frame_time, u32 strobe_delay, u32 refclk);
+       int (*timesync_disable)(struct gb_host_device *hd);
+       int (*timesync_authoritative)(struct gb_host_device *hd,
+                                     u64 *frame_time);
+       int (*timesync_get_last_event)(struct gb_host_device *hd,
+                                      u64 *frame_time);
+};
+
+struct gb_host_device {
+       struct device dev;
+       int bus_id;
+       const struct gb_hd_driver *driver;
+
+       struct list_head modules;
+       struct list_head connections;
+       struct ida cport_id_map;
+
+       /* Number of CPorts supported by the UniPro IP */
+       size_t num_cports;
+
+       /* Host device buffer constraints */
+       size_t buffer_size_max;
+
+       struct gb_svc *svc;
+       /* Private data for the host driver */
+       unsigned long hd_priv[0] __aligned(sizeof(s64));
+};
+#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev)
+
+int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id);
+void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id);
+int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
+                                       unsigned long flags);
+void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id);
+
+struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
+                                       struct device *parent,
+                                       size_t buffer_size_max,
+                                       size_t num_cports);
+int gb_hd_add(struct gb_host_device *hd);
+void gb_hd_del(struct gb_host_device *hd);
+void gb_hd_shutdown(struct gb_host_device *hd);
+void gb_hd_put(struct gb_host_device *hd);
+int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
+                bool in_irq);
+
+int gb_hd_init(void);
+void gb_hd_exit(void);
+
+#endif /* __HD_H */
diff --git a/drivers/staging/greybus/hid.c b/drivers/staging/greybus/hid.c
new file mode 100644 (file)
index 0000000..b558c81
--- /dev/null
@@ -0,0 +1,559 @@
+/*
+ * HID class driver for the Greybus.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/bitops.h>
+#include <linux/hid.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "greybus.h"
+
+/* Greybus HID device's structure */
+struct gb_hid {
+       struct gb_bundle *bundle;
+       struct gb_connection            *connection;
+
+       struct hid_device               *hid;
+       struct gb_hid_desc_response     hdesc;
+
+       unsigned long                   flags;
+#define GB_HID_STARTED                 0x01
+#define GB_HID_READ_PENDING            0x04
+
+       unsigned int                    bufsize;
+       char                            *inbuf;
+};
+
+static DEFINE_MUTEX(gb_hid_open_mutex);
+
+/* Routines to get controller's information over greybus */
+
+/* Operations performed on greybus */
+static int gb_hid_get_desc(struct gb_hid *ghid)
+{
+       return gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_DESC, NULL,
+                                0, &ghid->hdesc, sizeof(ghid->hdesc));
+}
+
+static int gb_hid_get_report_desc(struct gb_hid *ghid, char *rdesc)
+{
+       int ret;
+
+       ret = gb_pm_runtime_get_sync(ghid->bundle);
+       if (ret)
+               return ret;
+
+       ret = gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_REPORT_DESC,
+                                NULL, 0, rdesc,
+                                le16_to_cpu(ghid->hdesc.wReportDescLength));
+
+       gb_pm_runtime_put_autosuspend(ghid->bundle);
+
+       return ret;
+}
+
+static int gb_hid_set_power(struct gb_hid *ghid, int type)
+{
+       int ret;
+
+       ret = gb_pm_runtime_get_sync(ghid->bundle);
+       if (ret)
+               return ret;
+
+       ret = gb_operation_sync(ghid->connection, type, NULL, 0, NULL, 0);
+
+       gb_pm_runtime_put_autosuspend(ghid->bundle);
+
+       return ret;
+}
+
+static int gb_hid_get_report(struct gb_hid *ghid, u8 report_type, u8 report_id,
+                            unsigned char *buf, int len)
+{
+       struct gb_hid_get_report_request request;
+       int ret;
+
+       ret = gb_pm_runtime_get_sync(ghid->bundle);
+       if (ret)
+               return ret;
+
+       request.report_type = report_type;
+       request.report_id = report_id;
+
+       ret = gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_REPORT,
+                                &request, sizeof(request), buf, len);
+
+       gb_pm_runtime_put_autosuspend(ghid->bundle);
+
+       return ret;
+}
+
+static int gb_hid_set_report(struct gb_hid *ghid, u8 report_type, u8 report_id,
+                            unsigned char *buf, int len)
+{
+       struct gb_hid_set_report_request *request;
+       struct gb_operation *operation;
+       int ret, size = sizeof(*request) + len - 1;
+
+       ret = gb_pm_runtime_get_sync(ghid->bundle);
+       if (ret)
+               return ret;
+
+       operation = gb_operation_create(ghid->connection,
+                                       GB_HID_TYPE_SET_REPORT, size, 0,
+                                       GFP_KERNEL);
+       if (!operation) {
+               gb_pm_runtime_put_autosuspend(ghid->bundle);
+               return -ENOMEM;
+       }
+
+       request = operation->request->payload;
+       request->report_type = report_type;
+       request->report_id = report_id;
+       memcpy(request->report, buf, len);
+
+       ret = gb_operation_request_send_sync(operation);
+       if (ret) {
+               dev_err(&operation->connection->bundle->dev,
+                       "failed to set report: %d\n", ret);
+       } else {
+               ret = len;
+       }
+
+       gb_operation_put(operation);
+       gb_pm_runtime_put_autosuspend(ghid->bundle);
+
+       return ret;
+}
+
+static int gb_hid_request_handler(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gb_hid *ghid = gb_connection_get_data(connection);
+       struct gb_hid_input_report_request *request = op->request->payload;
+
+       if (op->type != GB_HID_TYPE_IRQ_EVENT) {
+               dev_err(&connection->bundle->dev,
+                       "unsupported unsolicited request\n");
+               return -EINVAL;
+       }
+
+       if (test_bit(GB_HID_STARTED, &ghid->flags))
+               hid_input_report(ghid->hid, HID_INPUT_REPORT,
+                                request->report, op->request->payload_size, 1);
+
+       return 0;
+}
+
+static int gb_hid_report_len(struct hid_report *report)
+{
+       return ((report->size - 1) >> 3) + 1 +
+               report->device->report_enum[report->type].numbered;
+}
+
+static void gb_hid_find_max_report(struct hid_device *hid, unsigned int type,
+                                  unsigned int *max)
+{
+       struct hid_report *report;
+       unsigned int size;
+
+       list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
+               size = gb_hid_report_len(report);
+               if (*max < size)
+                       *max = size;
+       }
+}
+
+static void gb_hid_free_buffers(struct gb_hid *ghid)
+{
+       kfree(ghid->inbuf);
+       ghid->inbuf = NULL;
+       ghid->bufsize = 0;
+}
+
+static int gb_hid_alloc_buffers(struct gb_hid *ghid, size_t bufsize)
+{
+       ghid->inbuf = kzalloc(bufsize, GFP_KERNEL);
+       if (!ghid->inbuf)
+               return -ENOMEM;
+
+       ghid->bufsize = bufsize;
+
+       return 0;
+}
+
+/* Routines dealing with reports */
+static void gb_hid_init_report(struct gb_hid *ghid, struct hid_report *report)
+{
+       unsigned int size;
+
+       size = gb_hid_report_len(report);
+       if (gb_hid_get_report(ghid, report->type, report->id, ghid->inbuf,
+                             size))
+               return;
+
+       /*
+        * hid->driver_lock is held as we are in probe function,
+        * we just need to setup the input fields, so using
+        * hid_report_raw_event is safe.
+        */
+       hid_report_raw_event(ghid->hid, report->type, ghid->inbuf, size, 1);
+}
+
+static void gb_hid_init_reports(struct gb_hid *ghid)
+{
+       struct hid_device *hid = ghid->hid;
+       struct hid_report *report;
+
+       list_for_each_entry(report,
+               &hid->report_enum[HID_INPUT_REPORT].report_list, list)
+               gb_hid_init_report(ghid, report);
+
+       list_for_each_entry(report,
+               &hid->report_enum[HID_FEATURE_REPORT].report_list, list)
+               gb_hid_init_report(ghid, report);
+}
+
+static int __gb_hid_get_raw_report(struct hid_device *hid,
+               unsigned char report_number, __u8 *buf, size_t count,
+               unsigned char report_type)
+{
+       struct gb_hid *ghid = hid->driver_data;
+       int ret;
+
+       if (report_type == HID_OUTPUT_REPORT)
+               return -EINVAL;
+
+       ret = gb_hid_get_report(ghid, report_type, report_number, buf, count);
+       if (!ret)
+               ret = count;
+
+       return ret;
+}
+
+static int __gb_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
+                                     size_t len, unsigned char report_type)
+{
+       struct gb_hid *ghid = hid->driver_data;
+       int report_id = buf[0];
+       int ret;
+
+       if (report_type == HID_INPUT_REPORT)
+               return -EINVAL;
+
+       if (report_id) {
+               buf++;
+               len--;
+       }
+
+       ret = gb_hid_set_report(ghid, report_type, report_id, buf, len);
+       if (report_id && ret >= 0)
+               ret++; /* add report_id to the number of transfered bytes */
+
+       return 0;
+}
+
+static int gb_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
+                              __u8 *buf, size_t len, unsigned char rtype,
+                              int reqtype)
+{
+       switch (reqtype) {
+       case HID_REQ_GET_REPORT:
+               return __gb_hid_get_raw_report(hid, reportnum, buf, len, rtype);
+       case HID_REQ_SET_REPORT:
+               if (buf[0] != reportnum)
+                       return -EINVAL;
+               return __gb_hid_output_raw_report(hid, buf, len, rtype);
+       default:
+               return -EIO;
+       }
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
+static int gb_hid_get_raw_report(struct hid_device *hid,
+                                  unsigned char reportnum, __u8 *buf,
+                                  size_t len, unsigned char rtype)
+{
+       return gb_hid_raw_request(hid, reportnum, buf, len, rtype,
+                                 HID_REQ_GET_REPORT);
+}
+
+static int gb_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
+                                   size_t len, unsigned char rtype)
+{
+       return gb_hid_raw_request(hid, buf[0], buf, len, rtype,
+                                 HID_REQ_SET_REPORT);
+}
+#endif
+
+/* HID Callbacks */
+static int gb_hid_parse(struct hid_device *hid)
+{
+       struct gb_hid *ghid = hid->driver_data;
+       unsigned int rsize;
+       char *rdesc;
+       int ret;
+
+       rsize = le16_to_cpu(ghid->hdesc.wReportDescLength);
+       if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
+               dbg_hid("weird size of report descriptor (%u)\n", rsize);
+               return -EINVAL;
+       }
+
+       rdesc = kzalloc(rsize, GFP_KERNEL);
+       if (!rdesc) {
+               dbg_hid("couldn't allocate rdesc memory\n");
+               return -ENOMEM;
+       }
+
+       ret = gb_hid_get_report_desc(ghid, rdesc);
+       if (ret) {
+               hid_err(hid, "reading report descriptor failed\n");
+               goto free_rdesc;
+       }
+
+       ret = hid_parse_report(hid, rdesc, rsize);
+       if (ret)
+               dbg_hid("parsing report descriptor failed\n");
+
+free_rdesc:
+       kfree(rdesc);
+
+       return ret;
+}
+
+static int gb_hid_start(struct hid_device *hid)
+{
+       struct gb_hid *ghid = hid->driver_data;
+       unsigned int bufsize = HID_MIN_BUFFER_SIZE;
+       int ret;
+
+       gb_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize);
+       gb_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize);
+       gb_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
+
+       if (bufsize > HID_MAX_BUFFER_SIZE)
+               bufsize = HID_MAX_BUFFER_SIZE;
+
+       ret = gb_hid_alloc_buffers(ghid, bufsize);
+       if (ret)
+               return ret;
+
+       if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS))
+               gb_hid_init_reports(ghid);
+
+       return 0;
+}
+
+static void gb_hid_stop(struct hid_device *hid)
+{
+       struct gb_hid *ghid = hid->driver_data;
+
+       gb_hid_free_buffers(ghid);
+}
+
+static int gb_hid_open(struct hid_device *hid)
+{
+       struct gb_hid *ghid = hid->driver_data;
+       int ret = 0;
+
+       mutex_lock(&gb_hid_open_mutex);
+       if (!hid->open++) {
+               ret = gb_hid_set_power(ghid, GB_HID_TYPE_PWR_ON);
+               if (ret < 0)
+                       hid->open--;
+               else
+                       set_bit(GB_HID_STARTED, &ghid->flags);
+       }
+       mutex_unlock(&gb_hid_open_mutex);
+
+       return ret;
+}
+
+static void gb_hid_close(struct hid_device *hid)
+{
+       struct gb_hid *ghid = hid->driver_data;
+       int ret;
+
+       /*
+        * Protecting hid->open to make sure we don't restart data acquistion
+        * due to a resumption we no longer care about..
+        */
+       mutex_lock(&gb_hid_open_mutex);
+       if (!--hid->open) {
+               clear_bit(GB_HID_STARTED, &ghid->flags);
+
+               /* Save some power */
+               ret = gb_hid_set_power(ghid, GB_HID_TYPE_PWR_OFF);
+               if (ret)
+                       dev_err(&ghid->connection->bundle->dev,
+                               "failed to power off (%d)\n", ret);
+       }
+       mutex_unlock(&gb_hid_open_mutex);
+}
+
+static int gb_hid_power(struct hid_device *hid, int lvl)
+{
+       struct gb_hid *ghid = hid->driver_data;
+
+       switch (lvl) {
+       case PM_HINT_FULLON:
+               return gb_hid_set_power(ghid, GB_HID_TYPE_PWR_ON);
+       case PM_HINT_NORMAL:
+               return gb_hid_set_power(ghid, GB_HID_TYPE_PWR_OFF);
+       }
+
+       return 0;
+}
+
+/* HID structure to pass callbacks */
+static struct hid_ll_driver gb_hid_ll_driver = {
+       .parse = gb_hid_parse,
+       .start = gb_hid_start,
+       .stop = gb_hid_stop,
+       .open = gb_hid_open,
+       .close = gb_hid_close,
+       .power = gb_hid_power,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0)
+       .raw_request = gb_hid_raw_request,
+#endif
+};
+
+static int gb_hid_init(struct gb_hid *ghid)
+{
+       struct hid_device *hid = ghid->hid;
+       int ret;
+
+       ret = gb_hid_get_desc(ghid);
+       if (ret)
+               return ret;
+
+       hid->version = le16_to_cpu(ghid->hdesc.bcdHID);
+       hid->vendor = le16_to_cpu(ghid->hdesc.wVendorID);
+       hid->product = le16_to_cpu(ghid->hdesc.wProductID);
+       hid->country = ghid->hdesc.bCountryCode;
+
+       hid->driver_data = ghid;
+       hid->ll_driver = &gb_hid_ll_driver;
+       hid->dev.parent = &ghid->connection->bundle->dev;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
+       hid->hid_get_raw_report = gb_hid_get_raw_report;
+       hid->hid_output_raw_report = gb_hid_output_raw_report;
+#endif
+//     hid->bus = BUS_GREYBUS; /* Need a bustype for GREYBUS in <linux/input.h> */
+
+       /* Set HID device's name */
+       snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
+                dev_name(&ghid->connection->bundle->dev),
+                hid->vendor, hid->product);
+
+       return 0;
+}
+
+static int gb_hid_probe(struct gb_bundle *bundle,
+                       const struct greybus_bundle_id *id)
+{
+       struct greybus_descriptor_cport *cport_desc;
+       struct gb_connection *connection;
+       struct hid_device *hid;
+       struct gb_hid *ghid;
+       int ret;
+
+       if (bundle->num_cports != 1)
+               return -ENODEV;
+
+       cport_desc = &bundle->cport_desc[0];
+       if (cport_desc->protocol_id != GREYBUS_PROTOCOL_HID)
+               return -ENODEV;
+
+       ghid = kzalloc(sizeof(*ghid), GFP_KERNEL);
+       if (!ghid)
+               return -ENOMEM;
+
+       connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+                                               gb_hid_request_handler);
+       if (IS_ERR(connection)) {
+               ret = PTR_ERR(connection);
+               goto err_free_ghid;
+       }
+
+       gb_connection_set_data(connection, ghid);
+       ghid->connection = connection;
+
+       hid = hid_allocate_device();
+       if (IS_ERR(hid)) {
+               ret = PTR_ERR(hid);
+               goto err_connection_destroy;
+       }
+
+       ghid->hid = hid;
+       ghid->bundle = bundle;
+
+       greybus_set_drvdata(bundle, ghid);
+
+       ret = gb_connection_enable(connection);
+       if (ret)
+               goto err_destroy_hid;
+
+       ret = gb_hid_init(ghid);
+       if (ret)
+               goto err_connection_disable;
+
+       ret = hid_add_device(hid);
+       if (ret) {
+               hid_err(hid, "can't add hid device: %d\n", ret);
+               goto err_connection_disable;
+       }
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return 0;
+
+err_connection_disable:
+       gb_connection_disable(connection);
+err_destroy_hid:
+       hid_destroy_device(hid);
+err_connection_destroy:
+       gb_connection_destroy(connection);
+err_free_ghid:
+       kfree(ghid);
+
+       return ret;
+}
+
+static void gb_hid_disconnect(struct gb_bundle *bundle)
+{
+       struct gb_hid *ghid = greybus_get_drvdata(bundle);
+
+       if (gb_pm_runtime_get_sync(bundle))
+               gb_pm_runtime_get_noresume(bundle);
+
+       hid_destroy_device(ghid->hid);
+       gb_connection_disable(ghid->connection);
+       gb_connection_destroy(ghid->connection);
+       kfree(ghid);
+}
+
+static const struct greybus_bundle_id gb_hid_id_table[] = {
+       { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_HID) },
+       { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_hid_id_table);
+
+static struct greybus_driver gb_hid_driver = {
+       .name           = "hid",
+       .probe          = gb_hid_probe,
+       .disconnect     = gb_hid_disconnect,
+       .id_table       = gb_hid_id_table,
+};
+module_greybus_driver(gb_hid_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/i2c.c b/drivers/staging/greybus/i2c.c
new file mode 100644 (file)
index 0000000..c2a5008
--- /dev/null
@@ -0,0 +1,343 @@
+/*
+ * I2C bridge driver for the Greybus "generic" I2C module.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+
+struct gb_i2c_device {
+       struct gb_connection    *connection;
+       struct gbphy_device     *gbphy_dev;
+
+       u32                     functionality;
+
+       struct i2c_adapter      adapter;
+};
+
+/*
+ * Map Greybus i2c functionality bits into Linux ones
+ */
+static u32 gb_i2c_functionality_map(u32 gb_i2c_functionality)
+{
+       return gb_i2c_functionality;    /* All bits the same for now */
+}
+
+static int gb_i2c_functionality_operation(struct gb_i2c_device *gb_i2c_dev)
+{
+       struct gb_i2c_functionality_response response;
+       u32 functionality;
+       int ret;
+
+       ret = gb_operation_sync(gb_i2c_dev->connection,
+                               GB_I2C_TYPE_FUNCTIONALITY,
+                               NULL, 0, &response, sizeof(response));
+       if (ret)
+               return ret;
+
+       functionality = le32_to_cpu(response.functionality);
+       gb_i2c_dev->functionality = gb_i2c_functionality_map(functionality);
+
+       return 0;
+}
+
+/*
+ * Map Linux i2c_msg flags into Greybus i2c transfer op flags.
+ */
+static u16 gb_i2c_transfer_op_flags_map(u16 flags)
+{
+       return flags;   /* All flags the same for now */
+}
+
+static void
+gb_i2c_fill_transfer_op(struct gb_i2c_transfer_op *op, struct i2c_msg *msg)
+{
+       u16 flags = gb_i2c_transfer_op_flags_map(msg->flags);
+
+       op->addr = cpu_to_le16(msg->addr);
+       op->flags = cpu_to_le16(flags);
+       op->size = cpu_to_le16(msg->len);
+}
+
+static struct gb_operation *
+gb_i2c_operation_create(struct gb_connection *connection,
+                       struct i2c_msg *msgs, u32 msg_count)
+{
+       struct gb_i2c_device *gb_i2c_dev = gb_connection_get_data(connection);
+       struct gb_i2c_transfer_request *request;
+       struct gb_operation *operation;
+       struct gb_i2c_transfer_op *op;
+       struct i2c_msg *msg;
+       u32 data_out_size = 0;
+       u32 data_in_size = 0;
+       size_t request_size;
+       void *data;
+       u16 op_count;
+       u32 i;
+
+       if (msg_count > (u32)U16_MAX) {
+               dev_err(&gb_i2c_dev->gbphy_dev->dev, "msg_count (%u) too big\n",
+                       msg_count);
+               return NULL;
+       }
+       op_count = (u16)msg_count;
+
+       /*
+        * In addition to space for all message descriptors we need
+        * to have enough to hold all outbound message data.
+        */
+       msg = msgs;
+       for (i = 0; i < msg_count; i++, msg++)
+               if (msg->flags & I2C_M_RD)
+                       data_in_size += (u32)msg->len;
+               else
+                       data_out_size += (u32)msg->len;
+
+       request_size = sizeof(*request);
+       request_size += msg_count * sizeof(*op);
+       request_size += data_out_size;
+
+       /* Response consists only of incoming data */
+       operation = gb_operation_create(connection, GB_I2C_TYPE_TRANSFER,
+                               request_size, data_in_size, GFP_KERNEL);
+       if (!operation)
+               return NULL;
+
+       request = operation->request->payload;
+       request->op_count = cpu_to_le16(op_count);
+       /* Fill in the ops array */
+       op = &request->ops[0];
+       msg = msgs;
+       for (i = 0; i < msg_count; i++)
+               gb_i2c_fill_transfer_op(op++, msg++);
+
+       if (!data_out_size)
+               return operation;
+
+       /* Copy over the outgoing data; it starts after the last op */
+       data = op;
+       msg = msgs;
+       for (i = 0; i < msg_count; i++) {
+               if (!(msg->flags & I2C_M_RD)) {
+                       memcpy(data, msg->buf, msg->len);
+                       data += msg->len;
+               }
+               msg++;
+       }
+
+       return operation;
+}
+
+static void gb_i2c_decode_response(struct i2c_msg *msgs, u32 msg_count,
+                               struct gb_i2c_transfer_response *response)
+{
+       struct i2c_msg *msg = msgs;
+       u8 *data;
+       u32 i;
+
+       if (!response)
+               return;
+       data = response->data;
+       for (i = 0; i < msg_count; i++) {
+               if (msg->flags & I2C_M_RD) {
+                       memcpy(msg->buf, data, msg->len);
+                       data += msg->len;
+               }
+               msg++;
+       }
+}
+
+/*
+ * Some i2c transfer operations return results that are expected.
+ */
+static bool gb_i2c_expected_transfer_error(int errno)
+{
+       return errno == -EAGAIN || errno == -ENODEV;
+}
+
+static int gb_i2c_transfer_operation(struct gb_i2c_device *gb_i2c_dev,
+                                       struct i2c_msg *msgs, u32 msg_count)
+{
+       struct gb_connection *connection = gb_i2c_dev->connection;
+       struct device *dev = &gb_i2c_dev->gbphy_dev->dev;
+       struct gb_operation *operation;
+       int ret;
+
+       operation = gb_i2c_operation_create(connection, msgs, msg_count);
+       if (!operation)
+               return -ENOMEM;
+
+       ret = gbphy_runtime_get_sync(gb_i2c_dev->gbphy_dev);
+       if (ret)
+               goto exit_operation_put;
+
+       ret = gb_operation_request_send_sync(operation);
+       if (!ret) {
+               struct gb_i2c_transfer_response *response;
+
+               response = operation->response->payload;
+               gb_i2c_decode_response(msgs, msg_count, response);
+               ret = msg_count;
+       } else if (!gb_i2c_expected_transfer_error(ret)) {
+               dev_err(dev, "transfer operation failed (%d)\n", ret);
+       }
+
+       gbphy_runtime_put_autosuspend(gb_i2c_dev->gbphy_dev);
+
+exit_operation_put:
+       gb_operation_put(operation);
+
+       return ret;
+}
+
+static int gb_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+               int msg_count)
+{
+       struct gb_i2c_device *gb_i2c_dev;
+
+       gb_i2c_dev = i2c_get_adapdata(adap);
+
+       return gb_i2c_transfer_operation(gb_i2c_dev, msgs, msg_count);
+}
+
+#if 0
+/* Later */
+static int gb_i2c_smbus_xfer(struct i2c_adapter *adap,
+                       u16 addr, unsigned short flags, char read_write,
+                       u8 command, int size, union i2c_smbus_data *data)
+{
+       struct gb_i2c_device *gb_i2c_dev;
+
+       gb_i2c_dev = i2c_get_adapdata(adap);
+
+       return 0;
+}
+#endif
+
+static u32 gb_i2c_functionality(struct i2c_adapter *adap)
+{
+       struct gb_i2c_device *gb_i2c_dev = i2c_get_adapdata(adap);
+
+       return gb_i2c_dev->functionality;
+}
+
+static const struct i2c_algorithm gb_i2c_algorithm = {
+       .master_xfer    = gb_i2c_master_xfer,
+       /* .smbus_xfer  = gb_i2c_smbus_xfer, */
+       .functionality  = gb_i2c_functionality,
+};
+
+/*
+ * Do initial setup of the i2c device.  This includes verifying we
+ * can support it (based on the protocol version it advertises).
+ * If that's OK, we get and cached its functionality bits.
+ *
+ * Note: gb_i2c_dev->connection is assumed to have been valid.
+ */
+static int gb_i2c_device_setup(struct gb_i2c_device *gb_i2c_dev)
+{
+       /* Assume the functionality never changes, just get it once */
+       return gb_i2c_functionality_operation(gb_i2c_dev);
+}
+
+static int gb_i2c_probe(struct gbphy_device *gbphy_dev,
+                        const struct gbphy_device_id *id)
+{
+       struct gb_connection *connection;
+       struct gb_i2c_device *gb_i2c_dev;
+       struct i2c_adapter *adapter;
+       int ret;
+
+       gb_i2c_dev = kzalloc(sizeof(*gb_i2c_dev), GFP_KERNEL);
+       if (!gb_i2c_dev)
+               return -ENOMEM;
+
+       connection = gb_connection_create(gbphy_dev->bundle,
+                                         le16_to_cpu(gbphy_dev->cport_desc->id),
+                                         NULL);
+       if (IS_ERR(connection)) {
+               ret = PTR_ERR(connection);
+               goto exit_i2cdev_free;
+       }
+
+       gb_i2c_dev->connection = connection;
+       gb_connection_set_data(connection, gb_i2c_dev);
+       gb_i2c_dev->gbphy_dev = gbphy_dev;
+       gb_gbphy_set_data(gbphy_dev, gb_i2c_dev);
+
+       ret = gb_connection_enable(connection);
+       if (ret)
+               goto exit_connection_destroy;
+
+       ret = gb_i2c_device_setup(gb_i2c_dev);
+       if (ret)
+               goto exit_connection_disable;
+
+       /* Looks good; up our i2c adapter */
+       adapter = &gb_i2c_dev->adapter;
+       adapter->owner = THIS_MODULE;
+       adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+       adapter->algo = &gb_i2c_algorithm;
+       /* adapter->algo_data = what? */
+
+       adapter->dev.parent = &gbphy_dev->dev;
+       snprintf(adapter->name, sizeof(adapter->name), "Greybus i2c adapter");
+       i2c_set_adapdata(adapter, gb_i2c_dev);
+
+       ret = i2c_add_adapter(adapter);
+       if (ret)
+               goto exit_connection_disable;
+
+       gbphy_runtime_put_autosuspend(gbphy_dev);
+       return 0;
+
+exit_connection_disable:
+       gb_connection_disable(connection);
+exit_connection_destroy:
+       gb_connection_destroy(connection);
+exit_i2cdev_free:
+       kfree(gb_i2c_dev);
+
+       return ret;
+}
+
+static void gb_i2c_remove(struct gbphy_device *gbphy_dev)
+{
+       struct gb_i2c_device *gb_i2c_dev = gb_gbphy_get_data(gbphy_dev);
+       struct gb_connection *connection = gb_i2c_dev->connection;
+       int ret;
+
+       ret = gbphy_runtime_get_sync(gbphy_dev);
+       if (ret)
+               gbphy_runtime_get_noresume(gbphy_dev);
+
+       i2c_del_adapter(&gb_i2c_dev->adapter);
+       gb_connection_disable(connection);
+       gb_connection_destroy(connection);
+       kfree(gb_i2c_dev);
+}
+
+static const struct gbphy_device_id gb_i2c_id_table[] = {
+       { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_I2C) },
+       { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_i2c_id_table);
+
+static struct gbphy_driver i2c_driver = {
+       .name           = "i2c",
+       .probe          = gb_i2c_probe,
+       .remove         = gb_i2c_remove,
+       .id_table       = gb_i2c_id_table,
+};
+
+module_gbphy_driver(i2c_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/interface.c b/drivers/staging/greybus/interface.c
new file mode 100644 (file)
index 0000000..c3ed3d7
--- /dev/null
@@ -0,0 +1,1316 @@
+/*
+ * Greybus interface code
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/delay.h>
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+#define GB_INTERFACE_MODE_SWITCH_TIMEOUT       2000
+
+#define GB_INTERFACE_DEVICE_ID_BAD     0xff
+
+#define GB_INTERFACE_AUTOSUSPEND_MS                    3000
+
+/* Time required for interface to enter standby before disabling REFCLK */
+#define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS                        20
+
+/* Don't-care selector index */
+#define DME_SELECTOR_INDEX_NULL                0
+
+/* DME attributes */
+/* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */
+#define DME_T_TST_SRC_INCREMENT                0x4083
+
+#define DME_DDBL1_MANUFACTURERID       0x5003
+#define DME_DDBL1_PRODUCTID            0x5004
+
+#define DME_TOSHIBA_GMP_VID            0x6000
+#define DME_TOSHIBA_GMP_PID            0x6001
+#define DME_TOSHIBA_GMP_SN0            0x6002
+#define DME_TOSHIBA_GMP_SN1            0x6003
+#define DME_TOSHIBA_GMP_INIT_STATUS    0x6101
+
+/* DDBL1 Manufacturer and Product ids */
+#define TOSHIBA_DMID                   0x0126
+#define TOSHIBA_ES2_BRIDGE_DPID                0x1000
+#define TOSHIBA_ES3_APBRIDGE_DPID      0x1001
+#define TOSHIBA_ES3_GBPHY_DPID 0x1002
+
+static int gb_interface_hibernate_link(struct gb_interface *intf);
+static int gb_interface_refclk_set(struct gb_interface *intf, bool enable);
+
+static int gb_interface_dme_attr_get(struct gb_interface *intf,
+                                                       u16 attr, u32 *val)
+{
+       return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id,
+                                       attr, DME_SELECTOR_INDEX_NULL, val);
+}
+
+static int gb_interface_read_ara_dme(struct gb_interface *intf)
+{
+       u32 sn0, sn1;
+       int ret;
+
+       /*
+        * Unless this is a Toshiba bridge, bail out until we have defined
+        * standard GMP attributes.
+        */
+       if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) {
+               dev_err(&intf->dev, "unknown manufacturer %08x\n",
+                               intf->ddbl1_manufacturer_id);
+               return -ENODEV;
+       }
+
+       ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID,
+                                       &intf->vendor_id);
+       if (ret)
+               return ret;
+
+       ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID,
+                                       &intf->product_id);
+       if (ret)
+               return ret;
+
+       ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0);
+       if (ret)
+               return ret;
+
+       ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1);
+       if (ret)
+               return ret;
+
+       intf->serial_number = (u64)sn1 << 32 | sn0;
+
+       return 0;
+}
+
+static int gb_interface_read_dme(struct gb_interface *intf)
+{
+       int ret;
+
+       /* DME attributes have already been read */
+       if (intf->dme_read)
+               return 0;
+
+       ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID,
+                                       &intf->ddbl1_manufacturer_id);
+       if (ret)
+               return ret;
+
+       ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID,
+                                       &intf->ddbl1_product_id);
+       if (ret)
+               return ret;
+
+       if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID &&
+                       intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) {
+               intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS;
+               intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS;
+       }
+
+       ret = gb_interface_read_ara_dme(intf);
+       if (ret)
+               return ret;
+
+       intf->dme_read = true;
+
+       return 0;
+}
+
+static int gb_interface_route_create(struct gb_interface *intf)
+{
+       struct gb_svc *svc = intf->hd->svc;
+       u8 intf_id = intf->interface_id;
+       u8 device_id;
+       int ret;
+
+       /* Allocate an interface device id. */
+       ret = ida_simple_get(&svc->device_id_map,
+                            GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
+                            GFP_KERNEL);
+       if (ret < 0) {
+               dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
+               return ret;
+       }
+       device_id = ret;
+
+       ret = gb_svc_intf_device_id(svc, intf_id, device_id);
+       if (ret) {
+               dev_err(&intf->dev, "failed to set device id %u: %d\n",
+                               device_id, ret);
+               goto err_ida_remove;
+       }
+
+       /* FIXME: Hard-coded AP device id. */
+       ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP,
+                                 intf_id, device_id);
+       if (ret) {
+               dev_err(&intf->dev, "failed to create route: %d\n", ret);
+               goto err_svc_id_free;
+       }
+
+       intf->device_id = device_id;
+
+       return 0;
+
+err_svc_id_free:
+       /*
+        * XXX Should we tell SVC that this id doesn't belong to interface
+        * XXX anymore.
+        */
+err_ida_remove:
+       ida_simple_remove(&svc->device_id_map, device_id);
+
+       return ret;
+}
+
+static void gb_interface_route_destroy(struct gb_interface *intf)
+{
+       struct gb_svc *svc = intf->hd->svc;
+
+       if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD)
+               return;
+
+       gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
+       ida_simple_remove(&svc->device_id_map, intf->device_id);
+       intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
+}
+
+/* Locking: Caller holds the interface mutex. */
+static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
+{
+       int ret;
+
+       dev_info(&intf->dev, "legacy mode switch detected\n");
+
+       /* Mark as disconnected to prevent I/O during disable. */
+       intf->disconnected = true;
+       gb_interface_disable(intf);
+       intf->disconnected = false;
+
+       ret = gb_interface_enable(intf);
+       if (ret) {
+               dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
+               gb_interface_deactivate(intf);
+       }
+
+       return ret;
+}
+
+void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
+                                                               u32 mailbox)
+{
+       mutex_lock(&intf->mutex);
+
+       if (result) {
+               dev_warn(&intf->dev,
+                               "mailbox event with UniPro error: 0x%04x\n",
+                               result);
+               goto err_disable;
+       }
+
+       if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
+               dev_warn(&intf->dev,
+                               "mailbox event with unexpected value: 0x%08x\n",
+                               mailbox);
+               goto err_disable;
+       }
+
+       if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
+               gb_interface_legacy_mode_switch(intf);
+               goto out_unlock;
+       }
+
+       if (!intf->mode_switch) {
+               dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
+                               mailbox);
+               goto err_disable;
+       }
+
+       dev_info(&intf->dev, "mode switch detected\n");
+
+       complete(&intf->mode_switch_completion);
+
+out_unlock:
+       mutex_unlock(&intf->mutex);
+
+       return;
+
+err_disable:
+       gb_interface_disable(intf);
+       gb_interface_deactivate(intf);
+       mutex_unlock(&intf->mutex);
+}
+
+static void gb_interface_mode_switch_work(struct work_struct *work)
+{
+       struct gb_interface *intf;
+       struct gb_control *control;
+       unsigned long timeout;
+       int ret;
+
+       intf = container_of(work, struct gb_interface, mode_switch_work);
+
+       mutex_lock(&intf->mutex);
+       /* Make sure interface is still enabled. */
+       if (!intf->enabled) {
+               dev_dbg(&intf->dev, "mode switch aborted\n");
+               intf->mode_switch = false;
+               mutex_unlock(&intf->mutex);
+               goto out_interface_put;
+       }
+
+       /*
+        * Prepare the control device for mode switch and make sure to get an
+        * extra reference before it goes away during interface disable.
+        */
+       control = gb_control_get(intf->control);
+       gb_control_mode_switch_prepare(control);
+       gb_interface_disable(intf);
+       mutex_unlock(&intf->mutex);
+
+       timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
+       ret = wait_for_completion_interruptible_timeout(
+                       &intf->mode_switch_completion, timeout);
+
+       /* Finalise control-connection mode switch. */
+       gb_control_mode_switch_complete(control);
+       gb_control_put(control);
+
+       if (ret < 0) {
+               dev_err(&intf->dev, "mode switch interrupted\n");
+               goto err_deactivate;
+       } else if (ret == 0) {
+               dev_err(&intf->dev, "mode switch timed out\n");
+               goto err_deactivate;
+       }
+
+       /* Re-enable (re-enumerate) interface if still active. */
+       mutex_lock(&intf->mutex);
+       intf->mode_switch = false;
+       if (intf->active) {
+               ret = gb_interface_enable(intf);
+               if (ret) {
+                       dev_err(&intf->dev, "failed to re-enable interface: %d\n",
+                                       ret);
+                       gb_interface_deactivate(intf);
+               }
+       }
+       mutex_unlock(&intf->mutex);
+
+out_interface_put:
+       gb_interface_put(intf);
+
+       return;
+
+err_deactivate:
+       mutex_lock(&intf->mutex);
+       intf->mode_switch = false;
+       gb_interface_deactivate(intf);
+       mutex_unlock(&intf->mutex);
+
+       gb_interface_put(intf);
+}
+
+int gb_interface_request_mode_switch(struct gb_interface *intf)
+{
+       int ret = 0;
+
+       mutex_lock(&intf->mutex);
+       if (intf->mode_switch) {
+               ret = -EBUSY;
+               goto out_unlock;
+       }
+
+       intf->mode_switch = true;
+       reinit_completion(&intf->mode_switch_completion);
+
+       /*
+        * Get a reference to the interface device, which will be put once the
+        * mode switch is complete.
+        */
+       get_device(&intf->dev);
+
+       if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
+               put_device(&intf->dev);
+               ret = -EBUSY;
+               goto out_unlock;
+       }
+
+out_unlock:
+       mutex_unlock(&intf->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
+
+/*
+ * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the
+ * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and
+ * clear it after reading a non-zero value from it.
+ *
+ * FIXME: This is module-hardware dependent and needs to be extended for every
+ * type of module we want to support.
+ */
+static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
+{
+       struct gb_host_device *hd = intf->hd;
+       unsigned long bootrom_quirks;
+       unsigned long s2l_quirks;
+       int ret;
+       u32 value;
+       u16 attr;
+       u8 init_status;
+
+       /*
+        * ES2 bridges use T_TstSrcIncrement for the init status.
+        *
+        * FIXME: Remove ES2 support
+        */
+       if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
+               attr = DME_T_TST_SRC_INCREMENT;
+       else
+               attr = DME_TOSHIBA_GMP_INIT_STATUS;
+
+       ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
+                                 DME_SELECTOR_INDEX_NULL, &value);
+       if (ret)
+               return ret;
+
+       /*
+        * A nonzero init status indicates the module has finished
+        * initializing.
+        */
+       if (!value) {
+               dev_err(&intf->dev, "invalid init status\n");
+               return -ENODEV;
+       }
+
+       /*
+        * Extract the init status.
+        *
+        * For ES2: We need to check lowest 8 bits of 'value'.
+        * For ES3: We need to check highest 8 bits out of 32 of 'value'.
+        *
+        * FIXME: Remove ES2 support
+        */
+       if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
+               init_status = value & 0xff;
+       else
+               init_status = value >> 24;
+
+       /*
+        * Check if the interface is executing the quirky ES3 bootrom that,
+        * for example, requires E2EFC, CSD and CSV to be disabled.
+        */
+       bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
+                               GB_INTERFACE_QUIRK_FORCED_DISABLE |
+                               GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH |
+                               GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE;
+
+       s2l_quirks = GB_INTERFACE_QUIRK_NO_PM;
+
+       switch (init_status) {
+       case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
+       case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
+               intf->quirks |= bootrom_quirks;
+               break;
+       case GB_INIT_S2_LOADER_BOOT_STARTED:
+               /* S2 Loader doesn't support runtime PM */
+               intf->quirks &= ~bootrom_quirks;
+               intf->quirks |= s2l_quirks;
+               break;
+       default:
+               intf->quirks &= ~bootrom_quirks;
+               intf->quirks &= ~s2l_quirks;
+       }
+
+       /* Clear the init status. */
+       return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
+                                  DME_SELECTOR_INDEX_NULL, 0);
+}
+
+/* interface sysfs attributes */
+#define gb_interface_attr(field, type)                                 \
+static ssize_t field##_show(struct device *dev,                                \
+                           struct device_attribute *attr,              \
+                           char *buf)                                  \
+{                                                                      \
+       struct gb_interface *intf = to_gb_interface(dev);               \
+       return scnprintf(buf, PAGE_SIZE, type"\n", intf->field);        \
+}                                                                      \
+static DEVICE_ATTR_RO(field)
+
+gb_interface_attr(ddbl1_manufacturer_id, "0x%08x");
+gb_interface_attr(ddbl1_product_id, "0x%08x");
+gb_interface_attr(interface_id, "%u");
+gb_interface_attr(vendor_id, "0x%08x");
+gb_interface_attr(product_id, "0x%08x");
+gb_interface_attr(serial_number, "0x%016llx");
+
+static ssize_t voltage_now_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+       int ret;
+       u32 measurement;
+
+       ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
+                                           GB_SVC_PWRMON_TYPE_VOL,
+                                           &measurement);
+       if (ret) {
+               dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret);
+               return ret;
+       }
+
+       return sprintf(buf, "%u\n", measurement);
+}
+static DEVICE_ATTR_RO(voltage_now);
+
+static ssize_t current_now_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+       int ret;
+       u32 measurement;
+
+       ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
+                                           GB_SVC_PWRMON_TYPE_CURR,
+                                           &measurement);
+       if (ret) {
+               dev_err(&intf->dev, "failed to get current sample (%d)\n", ret);
+               return ret;
+       }
+
+       return sprintf(buf, "%u\n", measurement);
+}
+static DEVICE_ATTR_RO(current_now);
+
+static ssize_t power_now_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+       int ret;
+       u32 measurement;
+
+       ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
+                                           GB_SVC_PWRMON_TYPE_PWR,
+                                           &measurement);
+       if (ret) {
+               dev_err(&intf->dev, "failed to get power sample (%d)\n", ret);
+               return ret;
+       }
+
+       return sprintf(buf, "%u\n", measurement);
+}
+static DEVICE_ATTR_RO(power_now);
+
+static ssize_t power_state_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+
+       if (intf->active)
+               return scnprintf(buf, PAGE_SIZE, "on\n");
+       else
+               return scnprintf(buf, PAGE_SIZE, "off\n");
+}
+
+static ssize_t power_state_store(struct device *dev,
+                                struct device_attribute *attr, const char *buf,
+                                size_t len)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+       bool activate;
+       int ret = 0;
+
+       if (kstrtobool(buf, &activate))
+               return -EINVAL;
+
+       mutex_lock(&intf->mutex);
+
+       if (activate == intf->active)
+               goto unlock;
+
+       if (activate) {
+               ret = gb_interface_activate(intf);
+               if (ret) {
+                       dev_err(&intf->dev,
+                               "failed to activate interface: %d\n", ret);
+                       goto unlock;
+               }
+
+               ret = gb_interface_enable(intf);
+               if (ret) {
+                       dev_err(&intf->dev,
+                               "failed to enable interface: %d\n", ret);
+                       gb_interface_deactivate(intf);
+                       goto unlock;
+               }
+       } else {
+               gb_interface_disable(intf);
+               gb_interface_deactivate(intf);
+       }
+
+unlock:
+       mutex_unlock(&intf->mutex);
+
+       if (ret)
+               return ret;
+
+       return len;
+}
+static DEVICE_ATTR_RW(power_state);
+
+static const char *gb_interface_type_string(struct gb_interface *intf)
+{
+       static const char * const types[] = {
+               [GB_INTERFACE_TYPE_INVALID] = "invalid",
+               [GB_INTERFACE_TYPE_UNKNOWN] = "unknown",
+               [GB_INTERFACE_TYPE_DUMMY] = "dummy",
+               [GB_INTERFACE_TYPE_UNIPRO] = "unipro",
+               [GB_INTERFACE_TYPE_GREYBUS] = "greybus",
+       };
+
+       return types[intf->type];
+}
+
+static ssize_t interface_type_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+
+       return sprintf(buf, "%s\n", gb_interface_type_string(intf));
+}
+static DEVICE_ATTR_RO(interface_type);
+
+static struct attribute *interface_unipro_attrs[] = {
+       &dev_attr_ddbl1_manufacturer_id.attr,
+       &dev_attr_ddbl1_product_id.attr,
+       NULL
+};
+
+static struct attribute *interface_greybus_attrs[] = {
+       &dev_attr_vendor_id.attr,
+       &dev_attr_product_id.attr,
+       &dev_attr_serial_number.attr,
+       NULL
+};
+
+static struct attribute *interface_power_attrs[] = {
+       &dev_attr_voltage_now.attr,
+       &dev_attr_current_now.attr,
+       &dev_attr_power_now.attr,
+       &dev_attr_power_state.attr,
+       NULL
+};
+
+static struct attribute *interface_common_attrs[] = {
+       &dev_attr_interface_id.attr,
+       &dev_attr_interface_type.attr,
+       NULL
+};
+
+static umode_t interface_unipro_is_visible(struct kobject *kobj,
+                                               struct attribute *attr, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct gb_interface *intf = to_gb_interface(dev);
+
+       switch (intf->type) {
+       case GB_INTERFACE_TYPE_UNIPRO:
+       case GB_INTERFACE_TYPE_GREYBUS:
+               return attr->mode;
+       default:
+               return 0;
+       }
+}
+
+static umode_t interface_greybus_is_visible(struct kobject *kobj,
+                                               struct attribute *attr, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct gb_interface *intf = to_gb_interface(dev);
+
+       switch (intf->type) {
+       case GB_INTERFACE_TYPE_GREYBUS:
+               return attr->mode;
+       default:
+               return 0;
+       }
+}
+
+static umode_t interface_power_is_visible(struct kobject *kobj,
+                                               struct attribute *attr, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct gb_interface *intf = to_gb_interface(dev);
+
+       switch (intf->type) {
+       case GB_INTERFACE_TYPE_UNIPRO:
+       case GB_INTERFACE_TYPE_GREYBUS:
+               return attr->mode;
+       default:
+               return 0;
+       }
+}
+
+static const struct attribute_group interface_unipro_group = {
+       .is_visible     = interface_unipro_is_visible,
+       .attrs          = interface_unipro_attrs,
+};
+
+static const struct attribute_group interface_greybus_group = {
+       .is_visible     = interface_greybus_is_visible,
+       .attrs          = interface_greybus_attrs,
+};
+
+static const struct attribute_group interface_power_group = {
+       .is_visible     = interface_power_is_visible,
+       .attrs          = interface_power_attrs,
+};
+
+static const struct attribute_group interface_common_group = {
+       .attrs          = interface_common_attrs,
+};
+
+static const struct attribute_group *interface_groups[] = {
+       &interface_unipro_group,
+       &interface_greybus_group,
+       &interface_power_group,
+       &interface_common_group,
+       NULL
+};
+
+static void gb_interface_release(struct device *dev)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+
+       trace_gb_interface_release(intf);
+
+       kfree(intf);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int gb_interface_suspend(struct device *dev)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+       int ret, timesync_ret;
+
+       ret = gb_control_interface_suspend_prepare(intf->control);
+       if (ret)
+               return ret;
+
+       gb_timesync_interface_remove(intf);
+
+       ret = gb_control_suspend(intf->control);
+       if (ret)
+               goto err_hibernate_abort;
+
+       ret = gb_interface_hibernate_link(intf);
+       if (ret)
+               return ret;
+
+       /* Delay to allow interface to enter standby before disabling refclk */
+       msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS);
+
+       ret = gb_interface_refclk_set(intf, false);
+       if (ret)
+               return ret;
+
+       return 0;
+
+err_hibernate_abort:
+       gb_control_interface_hibernate_abort(intf->control);
+
+       timesync_ret = gb_timesync_interface_add(intf);
+       if (timesync_ret) {
+               dev_err(dev, "failed to add to timesync: %d\n", timesync_ret);
+               return timesync_ret;
+       }
+
+       return ret;
+}
+
+static int gb_interface_resume(struct device *dev)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+       struct gb_svc *svc = intf->hd->svc;
+       int ret;
+
+       ret = gb_interface_refclk_set(intf, true);
+       if (ret)
+               return ret;
+
+       ret = gb_svc_intf_resume(svc, intf->interface_id);
+       if (ret)
+               return ret;
+
+       ret = gb_control_resume(intf->control);
+       if (ret)
+               return ret;
+
+       ret = gb_timesync_interface_add(intf);
+       if (ret) {
+               dev_err(dev, "failed to add to timesync: %d\n", ret);
+               return ret;
+       }
+
+       ret = gb_timesync_schedule_synchronous(intf);
+       if (ret) {
+               dev_err(dev, "failed to synchronize FrameTime: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_interface_runtime_idle(struct device *dev)
+{
+       pm_runtime_mark_last_busy(dev);
+       pm_request_autosuspend(dev);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_interface_pm_ops = {
+       SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume,
+                          gb_interface_runtime_idle)
+};
+
+struct device_type greybus_interface_type = {
+       .name =         "greybus_interface",
+       .release =      gb_interface_release,
+       .pm =           &gb_interface_pm_ops,
+};
+
+/*
+ * A Greybus module represents a user-replaceable component on a GMP
+ * phone.  An interface is the physical connection on that module.  A
+ * module may have more than one interface.
+ *
+ * Create a gb_interface structure to represent a discovered interface.
+ * The position of interface within the Endo is encoded in "interface_id"
+ * argument.
+ *
+ * Returns a pointer to the new interfce or a null pointer if a
+ * failure occurs due to memory exhaustion.
+ */
+struct gb_interface *gb_interface_create(struct gb_module *module,
+                                        u8 interface_id)
+{
+       struct gb_host_device *hd = module->hd;
+       struct gb_interface *intf;
+
+       intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+       if (!intf)
+               return NULL;
+
+       intf->hd = hd;          /* XXX refcount? */
+       intf->module = module;
+       intf->interface_id = interface_id;
+       INIT_LIST_HEAD(&intf->bundles);
+       INIT_LIST_HEAD(&intf->manifest_descs);
+       mutex_init(&intf->mutex);
+       INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
+       init_completion(&intf->mode_switch_completion);
+
+       /* Invalid device id to start with */
+       intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
+
+       intf->dev.parent = &module->dev;
+       intf->dev.bus = &greybus_bus_type;
+       intf->dev.type = &greybus_interface_type;
+       intf->dev.groups = interface_groups;
+       intf->dev.dma_mask = module->dev.dma_mask;
+       device_initialize(&intf->dev);
+       dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev),
+                       interface_id);
+
+       pm_runtime_set_autosuspend_delay(&intf->dev,
+                                        GB_INTERFACE_AUTOSUSPEND_MS);
+
+       trace_gb_interface_create(intf);
+
+       return intf;
+}
+
+static int gb_interface_vsys_set(struct gb_interface *intf, bool enable)
+{
+       struct gb_svc *svc = intf->hd->svc;
+       int ret;
+
+       dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
+
+       ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable);
+       if (ret) {
+               dev_err(&intf->dev, "failed to set v_sys: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_interface_refclk_set(struct gb_interface *intf, bool enable)
+{
+       struct gb_svc *svc = intf->hd->svc;
+       int ret;
+
+       dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
+
+       ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable);
+       if (ret) {
+               dev_err(&intf->dev, "failed to set refclk: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_interface_unipro_set(struct gb_interface *intf, bool enable)
+{
+       struct gb_svc *svc = intf->hd->svc;
+       int ret;
+
+       dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
+
+       ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable);
+       if (ret) {
+               dev_err(&intf->dev, "failed to set UniPro: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_interface_activate_operation(struct gb_interface *intf,
+                                          enum gb_interface_type *intf_type)
+{
+       struct gb_svc *svc = intf->hd->svc;
+       u8 type;
+       int ret;
+
+       dev_dbg(&intf->dev, "%s\n", __func__);
+
+       ret = gb_svc_intf_activate(svc, intf->interface_id, &type);
+       if (ret) {
+               dev_err(&intf->dev, "failed to activate: %d\n", ret);
+               return ret;
+       }
+
+       switch (type) {
+       case GB_SVC_INTF_TYPE_DUMMY:
+               *intf_type = GB_INTERFACE_TYPE_DUMMY;
+               /* FIXME: handle as an error for now */
+               return -ENODEV;
+       case GB_SVC_INTF_TYPE_UNIPRO:
+               *intf_type = GB_INTERFACE_TYPE_UNIPRO;
+               dev_err(&intf->dev, "interface type UniPro not supported\n");
+               /* FIXME: handle as an error for now */
+               return -ENODEV;
+       case GB_SVC_INTF_TYPE_GREYBUS:
+               *intf_type = GB_INTERFACE_TYPE_GREYBUS;
+               break;
+       default:
+               dev_err(&intf->dev, "unknown interface type: %u\n", type);
+               *intf_type = GB_INTERFACE_TYPE_UNKNOWN;
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int gb_interface_hibernate_link(struct gb_interface *intf)
+{
+       struct gb_svc *svc = intf->hd->svc;
+
+       return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id);
+}
+
+static int _gb_interface_activate(struct gb_interface *intf,
+                                 enum gb_interface_type *type)
+{
+       int ret;
+
+       *type = GB_INTERFACE_TYPE_UNKNOWN;
+
+       if (intf->ejected || intf->removed)
+               return -ENODEV;
+
+       ret = gb_interface_vsys_set(intf, true);
+       if (ret)
+               return ret;
+
+       ret = gb_interface_refclk_set(intf, true);
+       if (ret)
+               goto err_vsys_disable;
+
+       ret = gb_interface_unipro_set(intf, true);
+       if (ret)
+               goto err_refclk_disable;
+
+       ret = gb_interface_activate_operation(intf, type);
+       if (ret) {
+               switch (*type) {
+               case GB_INTERFACE_TYPE_UNIPRO:
+               case GB_INTERFACE_TYPE_GREYBUS:
+                       goto err_hibernate_link;
+               default:
+                       goto err_unipro_disable;
+               }
+       }
+
+       ret = gb_interface_read_dme(intf);
+       if (ret)
+               goto err_hibernate_link;
+
+       ret = gb_interface_route_create(intf);
+       if (ret)
+               goto err_hibernate_link;
+
+       intf->active = true;
+
+       trace_gb_interface_activate(intf);
+
+       return 0;
+
+err_hibernate_link:
+       gb_interface_hibernate_link(intf);
+err_unipro_disable:
+       gb_interface_unipro_set(intf, false);
+err_refclk_disable:
+       gb_interface_refclk_set(intf, false);
+err_vsys_disable:
+       gb_interface_vsys_set(intf, false);
+
+       return ret;
+}
+
+/*
+ * At present, we assume a UniPro-only module to be a Greybus module that
+ * failed to send its mailbox poke. There is some reason to believe that this
+ * is because of a bug in the ES3 bootrom.
+ *
+ * FIXME: Check if this is a Toshiba bridge before retrying?
+ */
+static int _gb_interface_activate_es3_hack(struct gb_interface *intf,
+                                          enum gb_interface_type *type)
+{
+       int retries = 3;
+       int ret;
+
+       while (retries--) {
+               ret = _gb_interface_activate(intf, type);
+               if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO)
+                       continue;
+
+               break;
+       }
+
+       return ret;
+}
+
+/*
+ * Activate an interface.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
+int gb_interface_activate(struct gb_interface *intf)
+{
+       enum gb_interface_type type;
+       int ret;
+
+       switch (intf->type) {
+       case GB_INTERFACE_TYPE_INVALID:
+       case GB_INTERFACE_TYPE_GREYBUS:
+               ret = _gb_interface_activate_es3_hack(intf, &type);
+               break;
+       default:
+               ret = _gb_interface_activate(intf, &type);
+       }
+
+       /* Make sure type is detected correctly during reactivation. */
+       if (intf->type != GB_INTERFACE_TYPE_INVALID) {
+               if (type != intf->type) {
+                       dev_err(&intf->dev, "failed to detect interface type\n");
+
+                       if (!ret)
+                               gb_interface_deactivate(intf);
+
+                       return -EIO;
+               }
+       } else {
+               intf->type = type;
+       }
+
+       return ret;
+}
+
+/*
+ * Deactivate an interface.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
+void gb_interface_deactivate(struct gb_interface *intf)
+{
+       if (!intf->active)
+               return;
+
+       trace_gb_interface_deactivate(intf);
+
+       /* Abort any ongoing mode switch. */
+       if (intf->mode_switch)
+               complete(&intf->mode_switch_completion);
+
+       gb_interface_route_destroy(intf);
+       gb_interface_hibernate_link(intf);
+       gb_interface_unipro_set(intf, false);
+       gb_interface_refclk_set(intf, false);
+       gb_interface_vsys_set(intf, false);
+
+       intf->active = false;
+}
+
+/*
+ * Enable an interface by enabling its control connection, fetching the
+ * manifest and other information over it, and finally registering its child
+ * devices.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
+int gb_interface_enable(struct gb_interface *intf)
+{
+       struct gb_control *control;
+       struct gb_bundle *bundle, *tmp;
+       int ret, size;
+       void *manifest;
+
+       ret = gb_interface_read_and_clear_init_status(intf);
+       if (ret) {
+               dev_err(&intf->dev, "failed to clear init status: %d\n", ret);
+               return ret;
+       }
+
+       /* Establish control connection */
+       control = gb_control_create(intf);
+       if (IS_ERR(control)) {
+               dev_err(&intf->dev, "failed to create control device: %ld\n",
+                               PTR_ERR(control));
+               return PTR_ERR(control);
+       }
+       intf->control = control;
+
+       ret = gb_control_enable(intf->control);
+       if (ret)
+               goto err_put_control;
+
+       /* Get manifest size using control protocol on CPort */
+       size = gb_control_get_manifest_size_operation(intf);
+       if (size <= 0) {
+               dev_err(&intf->dev, "failed to get manifest size: %d\n", size);
+
+               if (size)
+                       ret = size;
+               else
+                       ret =  -EINVAL;
+
+               goto err_disable_control;
+       }
+
+       manifest = kmalloc(size, GFP_KERNEL);
+       if (!manifest) {
+               ret = -ENOMEM;
+               goto err_disable_control;
+       }
+
+       /* Get manifest using control protocol on CPort */
+       ret = gb_control_get_manifest_operation(intf, manifest, size);
+       if (ret) {
+               dev_err(&intf->dev, "failed to get manifest: %d\n", ret);
+               goto err_free_manifest;
+       }
+
+       /*
+        * Parse the manifest and build up our data structures representing
+        * what's in it.
+        */
+       if (!gb_manifest_parse(intf, manifest, size)) {
+               dev_err(&intf->dev, "failed to parse manifest\n");
+               ret = -EINVAL;
+               goto err_destroy_bundles;
+       }
+
+       ret = gb_control_get_bundle_versions(intf->control);
+       if (ret)
+               goto err_destroy_bundles;
+
+       ret = gb_timesync_interface_add(intf);
+       if (ret) {
+               dev_err(&intf->dev, "failed to add to timesync: %d\n", ret);
+               goto err_destroy_bundles;
+       }
+
+       /* Register the control device and any bundles */
+       ret = gb_control_add(intf->control);
+       if (ret)
+               goto err_remove_timesync;
+
+       pm_runtime_use_autosuspend(&intf->dev);
+       pm_runtime_get_noresume(&intf->dev);
+       pm_runtime_set_active(&intf->dev);
+       pm_runtime_enable(&intf->dev);
+
+       list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
+               ret = gb_bundle_add(bundle);
+               if (ret) {
+                       gb_bundle_destroy(bundle);
+                       continue;
+               }
+       }
+
+       kfree(manifest);
+
+       intf->enabled = true;
+
+       pm_runtime_put(&intf->dev);
+
+       trace_gb_interface_enable(intf);
+
+       return 0;
+
+err_remove_timesync:
+       gb_timesync_interface_remove(intf);
+err_destroy_bundles:
+       list_for_each_entry_safe(bundle, tmp, &intf->bundles, links)
+               gb_bundle_destroy(bundle);
+err_free_manifest:
+       kfree(manifest);
+err_disable_control:
+       gb_control_disable(intf->control);
+err_put_control:
+       gb_control_put(intf->control);
+       intf->control = NULL;
+
+       return ret;
+}
+
+/*
+ * Disable an interface and destroy its bundles.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
+void gb_interface_disable(struct gb_interface *intf)
+{
+       struct gb_bundle *bundle;
+       struct gb_bundle *next;
+
+       if (!intf->enabled)
+               return;
+
+       trace_gb_interface_disable(intf);
+
+       pm_runtime_get_sync(&intf->dev);
+
+       /* Set disconnected flag to avoid I/O during connection tear down. */
+       if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE)
+               intf->disconnected = true;
+
+       list_for_each_entry_safe(bundle, next, &intf->bundles, links)
+               gb_bundle_destroy(bundle);
+
+       if (!intf->mode_switch && !intf->disconnected)
+               gb_control_interface_deactivate_prepare(intf->control);
+
+       gb_control_del(intf->control);
+       gb_timesync_interface_remove(intf);
+       gb_control_disable(intf->control);
+       gb_control_put(intf->control);
+       intf->control = NULL;
+
+       intf->enabled = false;
+
+       pm_runtime_disable(&intf->dev);
+       pm_runtime_set_suspended(&intf->dev);
+       pm_runtime_dont_use_autosuspend(&intf->dev);
+       pm_runtime_put_noidle(&intf->dev);
+}
+
+/* Enable TimeSync on an Interface control connection. */
+int gb_interface_timesync_enable(struct gb_interface *intf, u8 count,
+                                u64 frame_time, u32 strobe_delay, u32 refclk)
+{
+       return gb_control_timesync_enable(intf->control, count,
+                                         frame_time, strobe_delay,
+                                         refclk);
+}
+
+/* Disable TimeSync on an Interface control connection. */
+int gb_interface_timesync_disable(struct gb_interface *intf)
+{
+       return gb_control_timesync_disable(intf->control);
+}
+
+/* Transmit the Authoritative FrameTime via an Interface control connection. */
+int gb_interface_timesync_authoritative(struct gb_interface *intf,
+                                       u64 *frame_time)
+{
+       return gb_control_timesync_authoritative(intf->control,
+                                               frame_time);
+}
+
+/* Register an interface. */
+int gb_interface_add(struct gb_interface *intf)
+{
+       int ret;
+
+       ret = device_add(&intf->dev);
+       if (ret) {
+               dev_err(&intf->dev, "failed to register interface: %d\n", ret);
+               return ret;
+       }
+
+       trace_gb_interface_add(intf);
+
+       dev_info(&intf->dev, "Interface added (%s)\n",
+                       gb_interface_type_string(intf));
+
+       switch (intf->type) {
+       case GB_INTERFACE_TYPE_GREYBUS:
+               dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
+                               intf->vendor_id, intf->product_id);
+               /* fall-through */
+       case GB_INTERFACE_TYPE_UNIPRO:
+               dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
+                               intf->ddbl1_manufacturer_id,
+                               intf->ddbl1_product_id);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+/* Deregister an interface. */
+void gb_interface_del(struct gb_interface *intf)
+{
+       if (device_is_registered(&intf->dev)) {
+               trace_gb_interface_del(intf);
+
+               device_del(&intf->dev);
+               dev_info(&intf->dev, "Interface removed\n");
+       }
+}
+
+void gb_interface_put(struct gb_interface *intf)
+{
+       put_device(&intf->dev);
+}
diff --git a/drivers/staging/greybus/interface.h b/drivers/staging/greybus/interface.h
new file mode 100644 (file)
index 0000000..03299d2
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Greybus Interface Block code
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __INTERFACE_H
+#define __INTERFACE_H
+
+enum gb_interface_type {
+       GB_INTERFACE_TYPE_INVALID = 0,
+       GB_INTERFACE_TYPE_UNKNOWN,
+       GB_INTERFACE_TYPE_DUMMY,
+       GB_INTERFACE_TYPE_UNIPRO,
+       GB_INTERFACE_TYPE_GREYBUS,
+};
+
+#define GB_INTERFACE_QUIRK_NO_CPORT_FEATURES           BIT(0)
+#define GB_INTERFACE_QUIRK_NO_INIT_STATUS              BIT(1)
+#define GB_INTERFACE_QUIRK_NO_GMP_IDS                  BIT(2)
+#define GB_INTERFACE_QUIRK_FORCED_DISABLE              BIT(3)
+#define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH          BIT(4)
+#define GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE          BIT(5)
+#define GB_INTERFACE_QUIRK_NO_PM                       BIT(6)
+
+struct gb_interface {
+       struct device dev;
+       struct gb_control *control;
+
+       struct list_head bundles;
+       struct list_head module_node;
+       struct list_head manifest_descs;
+       u8 interface_id;        /* Physical location within the Endo */
+       u8 device_id;
+       u8 features;            /* Feature flags set in the manifest */
+
+       enum gb_interface_type type;
+
+       u32 ddbl1_manufacturer_id;
+       u32 ddbl1_product_id;
+       u32 vendor_id;
+       u32 product_id;
+       u64 serial_number;
+
+       struct gb_host_device *hd;
+       struct gb_module *module;
+
+       unsigned long quirks;
+
+       struct mutex mutex;
+
+       bool disconnected;
+
+       bool ejected;
+       bool removed;
+       bool active;
+       bool enabled;
+       bool mode_switch;
+       bool dme_read;
+
+       struct work_struct mode_switch_work;
+       struct completion mode_switch_completion;
+};
+#define to_gb_interface(d) container_of(d, struct gb_interface, dev)
+
+struct gb_interface *gb_interface_create(struct gb_module *module,
+                                        u8 interface_id);
+int gb_interface_activate(struct gb_interface *intf);
+void gb_interface_deactivate(struct gb_interface *intf);
+int gb_interface_enable(struct gb_interface *intf);
+void gb_interface_disable(struct gb_interface *intf);
+int gb_interface_timesync_enable(struct gb_interface *intf, u8 count,
+                                u64 frame_time, u32 strobe_delay, u32 refclk);
+int gb_interface_timesync_authoritative(struct gb_interface *intf,
+                                       u64 *frame_time);
+int gb_interface_timesync_disable(struct gb_interface *intf);
+int gb_interface_add(struct gb_interface *intf);
+void gb_interface_del(struct gb_interface *intf);
+void gb_interface_put(struct gb_interface *intf);
+void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
+                                                               u32 mailbox);
+
+int gb_interface_request_mode_switch(struct gb_interface *intf);
+
+#endif /* __INTERFACE_H */
diff --git a/drivers/staging/greybus/kernel_ver.h b/drivers/staging/greybus/kernel_ver.h
new file mode 100644 (file)
index 0000000..97e7ac9
--- /dev/null
@@ -0,0 +1,421 @@
+/*
+ * Greybus kernel "version" glue logic.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ *
+ * Backports of newer kernel apis to allow the code to build properly on older
+ * kernel versions.  Remove this file when merging to upstream, it should not be
+ * needed at all
+ */
+
+#ifndef __GREYBUS_KERNEL_VER_H
+#define __GREYBUS_KERNEL_VER_H
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+/* Commit: 297d716 power_supply: Change ownership from driver to core */
+#define CORE_OWNS_PSY_STRUCT
+#endif
+
+#ifndef __ATTR_WO
+#define __ATTR_WO(_name) {                                             \
+       .attr   = { .name = __stringify(_name), .mode = S_IWUSR },      \
+       .store  = _name##_store,                                        \
+}
+#endif
+
+#ifndef __ATTR_RW
+#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO),            \
+                               _name##_show, _name##_store)
+#endif
+
+#ifndef DEVICE_ATTR_RO
+#define DEVICE_ATTR_RO(_name) \
+       struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
+#endif
+
+#ifndef DEVICE_ATTR_WO
+#define DEVICE_ATTR_WO(_name) \
+       struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
+#endif
+
+#ifndef DEVICE_ATTR_RW
+#define DEVICE_ATTR_RW(_name) \
+       struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
+#endif
+
+#ifndef U8_MAX
+#define U8_MAX ((u8)~0U)
+#endif /* ! U8_MAX */
+
+#ifndef U16_MAX
+#define U16_MAX        ((u16)(~0U))
+#endif /* !U16_MAX */
+
+#ifndef U32_MAX
+#define U32_MAX        ((u32)(~0U))
+#endif /* !U32_MAX */
+
+#ifndef U64_MAX
+#define U64_MAX        ((u64)(~0U))
+#endif /* !U64_MAX */
+
+/*
+ * The GPIO api sucks rocks in places, like removal, so work around their
+ * explicit requirements of catching the return value for kernels older than
+ * 3.17, which they explicitly changed in the 3.17 kernel.  Consistency is
+ * overrated.
+ */
+#include <linux/gpio.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+static inline void gb_gpiochip_remove(struct gpio_chip *chip)
+{
+       gpiochip_remove(chip);
+}
+#else
+static inline void gb_gpiochip_remove(struct gpio_chip *chip)
+{
+       int ret;
+
+       ret = gpiochip_remove(chip);
+}
+#endif
+
+/*
+ * ATTRIBUTE_GROUPS showed up in 3.11-rc2, but we need to build on 3.10, so add
+ * it here.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)
+#include <linux/sysfs.h>
+
+#define ATTRIBUTE_GROUPS(name)                                 \
+static const struct attribute_group name##_group = {           \
+       .attrs = name##_attrs,                                  \
+};                                                             \
+static const struct attribute_group *name##_groups[] = {       \
+       &name##_group,                                          \
+       NULL,                                                   \
+}
+
+static inline int sysfs_create_groups(struct kobject *kobj,
+                                     const struct attribute_group **groups)
+{
+       int error = 0;
+       int i;
+
+       if (!groups)
+               return 0;
+
+       for (i = 0; groups[i]; i++) {
+               error = sysfs_create_group(kobj, groups[i]);
+               if (error) {
+                       while (--i >= 0)
+                               sysfs_remove_group(kobj, groups[i]);
+                       break;
+               }
+       }
+       return error;
+}
+
+static inline void sysfs_remove_groups(struct kobject *kobj,
+                                      const struct attribute_group **groups)
+{
+       int i;
+
+       if (!groups)
+               return;
+       for (i = 0; groups[i]; i++)
+               sysfs_remove_group(kobj, groups[i]);
+}
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+#define MMC_HS400_SUPPORTED
+#define MMC_DDR52_DEFINED
+#endif
+
+#ifndef MMC_CAP2_CORE_RUNTIME_PM
+#define MMC_CAP2_CORE_RUNTIME_PM       0
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+#define MMC_POWER_UNDEFINED_SUPPORTED
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)
+#include <linux/scatterlist.h>
+static inline bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
+{
+       if (!miter->__remaining) {
+               struct scatterlist *sg;
+               unsigned long pgoffset;
+
+               if (!__sg_page_iter_next(&miter->piter))
+                       return false;
+
+               sg = miter->piter.sg;
+               pgoffset = miter->piter.sg_pgoffset;
+
+               miter->__offset = pgoffset ? 0 : sg->offset;
+               miter->__remaining = sg->offset + sg->length -
+                               (pgoffset << PAGE_SHIFT) - miter->__offset;
+               miter->__remaining = min_t(unsigned long, miter->__remaining,
+                                          PAGE_SIZE - miter->__offset);
+       }
+
+       return true;
+}
+
+static inline bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
+{
+       sg_miter_stop(miter);
+
+       while (offset) {
+               off_t consumed;
+
+               if (!sg_miter_get_next_page(miter))
+                       return false;
+
+               consumed = min_t(off_t, offset, miter->__remaining);
+               miter->__offset += consumed;
+               miter->__remaining -= consumed;
+               offset -= consumed;
+       }
+
+       return true;
+}
+
+static inline size_t _sg_copy_buffer(struct scatterlist *sgl,
+                                    unsigned int nents, void *buf,
+                                    size_t buflen, off_t skip,
+                                    bool to_buffer)
+{
+       unsigned int offset = 0;
+       struct sg_mapping_iter miter;
+       unsigned long flags;
+       unsigned int sg_flags = SG_MITER_ATOMIC;
+
+       if (to_buffer)
+               sg_flags |= SG_MITER_FROM_SG;
+       else
+               sg_flags |= SG_MITER_TO_SG;
+
+       sg_miter_start(&miter, sgl, nents, sg_flags);
+
+       if (!sg_miter_skip(&miter, skip))
+               return false;
+
+       local_irq_save(flags);
+
+       while (sg_miter_next(&miter) && offset < buflen) {
+               unsigned int len;
+
+               len = min(miter.length, buflen - offset);
+
+               if (to_buffer)
+                       memcpy(buf + offset, miter.addr, len);
+               else
+                       memcpy(miter.addr, buf + offset, len);
+
+               offset += len;
+       }
+
+       sg_miter_stop(&miter);
+
+       local_irq_restore(flags);
+       return offset;
+}
+
+static inline size_t sg_pcopy_to_buffer(struct scatterlist *sgl,
+                                       unsigned int nents, void *buf,
+                                       size_t buflen, off_t skip)
+{
+       return _sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
+}
+
+static inline size_t sg_pcopy_from_buffer(struct scatterlist *sgl,
+                                         unsigned int nents, void *buf,
+                                         size_t buflen, off_t skip)
+{
+       return _sg_copy_buffer(sgl, nents, buf, buflen, skip, false);
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
+#define list_last_entry(ptr, type, member) \
+       list_entry((ptr)->prev, type, member)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
+/*
+ * At this time the internal API for the set brightness was changed to the async
+ * version, and one sync API was added to handle cases that need immediate
+ * effect. Also, the led class flash and lock for sysfs access was introduced.
+ */
+#define LED_HAVE_SET_SYNC
+#define LED_HAVE_FLASH
+#define LED_HAVE_LOCK
+#include <linux/led-class-flash.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
+/*
+ * New change in LED api, the set_sync operation was renamed to set_blocking and
+ * the workqueue is now handle by core. So, only one set operation is need.
+ */
+#undef LED_HAVE_SET_SYNC
+#define LED_HAVE_SET_BLOCKING
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
+/*
+ * From this version upper it was introduced the possibility to disable led
+ * sysfs entries to handle control of the led device to v4l2, which was
+ * implemented later. So, before that this should return false.
+ */
+#include <linux/leds.h>
+static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev)
+{
+       return false;
+}
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
+/*
+ * New helper functions for registering/unregistering flash led devices as v4l2
+ * subdevices were added.
+ */
+#define V4L2_HAVE_FLASH
+#include <media/v4l2-flash-led-class.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+/*
+ * Power supply get by name need to drop reference after call
+ */
+#define PSY_HAVE_PUT
+#endif
+
+/*
+ * General power supply properties that could be absent from various reasons,
+ * like kernel versions or vendor specific versions
+ */
+#ifndef POWER_SUPPLY_PROP_VOLTAGE_BOOT
+       #define POWER_SUPPLY_PROP_VOLTAGE_BOOT  -1
+#endif
+#ifndef POWER_SUPPLY_PROP_CURRENT_BOOT
+       #define POWER_SUPPLY_PROP_CURRENT_BOOT  -1
+#endif
+#ifndef POWER_SUPPLY_PROP_CALIBRATE
+       #define POWER_SUPPLY_PROP_CALIBRATE     -1
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+#define SPI_DEV_MODALIAS "spidev"
+#define SPI_NOR_MODALIAS "spi-nor"
+#else
+#define SPI_DEV_MODALIAS "spidev"
+#define SPI_NOR_MODALIAS "m25p80"
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)
+/* Starting from this version, the spi core handles runtime pm automatically */
+#define SPI_CORE_SUPPORT_PM
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
+/**
+ * reinit_completion - reinitialize a completion structure
+ * @x:  pointer to completion structure that is to be reinitialized
+ *
+ * This inline function should be used to reinitialize a completion structure
+ * so it can be reused. This is especially important after complete_all() is
+ * used.
+ */
+static inline void reinit_completion(struct completion *x)
+{
+       x->done = 0;
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+#include <linux/pwm.h>
+/*
+ * pwm_is_enabled() was first defined in 4.3.
+ * PWMF_ENABLED was first defined in 3.5-rc2, but our code is
+ * always newer than that.
+*/
+static inline bool pwm_is_enabled(const struct pwm_device *pwm)
+{
+       return test_bit(PWMF_ENABLED, &pwm->flags);
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+/**
+ * kstrtobool - convert common user inputs into boolean values
+ * @s: input string
+ * @res: result
+ *
+ * This routine returns 0 iff the first character is one of 'Yy1Nn0', or
+ * [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL.  Value
+ * pointed to by res is updated upon finding a match.
+ */
+static inline int kstrtobool(const char *s, bool *res)
+{
+       if (!s)
+               return -EINVAL;
+
+       switch (s[0]) {
+       case 'y':
+       case 'Y':
+       case '1':
+               *res = true;
+               return 0;
+       case 'n':
+       case 'N':
+       case '0':
+               *res = false;
+               return 0;
+       case 'o':
+       case 'O':
+               switch (s[1]) {
+               case 'n':
+               case 'N':
+                       *res = true;
+                       return 0;
+               case 'f':
+               case 'F':
+                       *res = false;
+                       return 0;
+               default:
+                       break;
+               }
+       default:
+               break;
+       }
+
+       return -EINVAL;
+}
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
+/*
+ * After commit b2b49ccbdd54 (PM: Kconfig: Set PM_RUNTIME if PM_SLEEP is
+ * selected) PM_RUNTIME is always set if PM is set, so files that are build
+ * conditionally if CONFIG_PM_RUNTIME is set may now be build if CONFIG_PM is
+ * set.
+ */
+
+#ifdef CONFIG_PM
+#define CONFIG_PM_RUNTIME
+#endif /* CONFIG_PM */
+#endif
+
+#endif /* __GREYBUS_KERNEL_VER_H */
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
new file mode 100644 (file)
index 0000000..71db077
--- /dev/null
@@ -0,0 +1,1416 @@
+/*
+ * Greybus Lights protocol driver.
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+
+#include "greybus.h"
+#include "greybus_protocols.h"
+
+#define NAMES_MAX      32
+
+struct gb_channel {
+       u8                              id;
+       u32                             flags;
+       u32                             color;
+       char                            *color_name;
+       u8                              fade_in;
+       u8                              fade_out;
+       u32                             mode;
+       char                            *mode_name;
+       struct attribute                **attrs;
+       struct attribute_group          *attr_group;
+       const struct attribute_group    **attr_groups;
+#ifndef LED_HAVE_SET_BLOCKING
+       struct work_struct              work_brightness_set;
+#endif
+       struct led_classdev             *led;
+#ifdef LED_HAVE_FLASH
+       struct led_classdev_flash       fled;
+       struct led_flash_setting        intensity_uA;
+       struct led_flash_setting        timeout_us;
+#else
+       struct led_classdev             cled;
+#endif
+       struct gb_light                 *light;
+       bool                            is_registered;
+       bool                            releasing;
+       bool                            strobe_state;
+       bool                            active;
+       struct mutex                    lock;
+};
+
+struct gb_light {
+       u8                      id;
+       char                    *name;
+       struct gb_lights        *glights;
+       u32                     flags;
+       u8                      channels_count;
+       struct gb_channel       *channels;
+       bool                    has_flash;
+       bool                    ready;
+#ifdef V4L2_HAVE_FLASH
+       struct v4l2_flash       *v4l2_flash;
+#endif
+};
+
+struct gb_lights {
+       struct gb_connection    *connection;
+       u8                      lights_count;
+       struct gb_light         *lights;
+       struct mutex            lights_lock;
+};
+
+static void gb_lights_channel_free(struct gb_channel *channel);
+
+static struct gb_connection *get_conn_from_channel(struct gb_channel *channel)
+{
+       return channel->light->glights->connection;
+}
+
+static struct gb_connection *get_conn_from_light(struct gb_light *light)
+{
+       return light->glights->connection;
+}
+
+static bool is_channel_flash(struct gb_channel *channel)
+{
+       return !!(channel->mode & (GB_CHANNEL_MODE_FLASH | GB_CHANNEL_MODE_TORCH
+                                  | GB_CHANNEL_MODE_INDICATOR));
+}
+
+#ifdef LED_HAVE_FLASH
+static struct gb_channel *get_channel_from_cdev(struct led_classdev *cdev)
+{
+       struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(cdev);
+
+       return container_of(fled_cdev, struct gb_channel, fled);
+}
+
+static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
+{
+       return &channel->fled.led_cdev;
+}
+
+static struct gb_channel *get_channel_from_mode(struct gb_light *light,
+                                               u32 mode)
+{
+       struct gb_channel *channel = NULL;
+       int i;
+
+       for (i = 0; i < light->channels_count; i++) {
+               channel = &light->channels[i];
+               if (channel && channel->mode == mode)
+                       break;
+       }
+       return channel;
+}
+
+static int __gb_lights_flash_intensity_set(struct gb_channel *channel,
+                                          u32 intensity)
+{
+       struct gb_connection *connection = get_conn_from_channel(channel);
+       struct gb_bundle *bundle = connection->bundle;
+       struct gb_lights_set_flash_intensity_request req;
+       int ret;
+
+       if (channel->releasing)
+               return -ESHUTDOWN;
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret < 0)
+               return ret;
+
+       req.light_id = channel->light->id;
+       req.channel_id = channel->id;
+       req.intensity_uA = cpu_to_le32(intensity);
+
+       ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FLASH_INTENSITY,
+                               &req, sizeof(req), NULL, 0);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return ret;
+}
+
+static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
+{
+       u32 intensity;
+
+       /* If the channel is flash we need to get the attached torch channel */
+       if (channel->mode & GB_CHANNEL_MODE_FLASH)
+               channel = get_channel_from_mode(channel->light,
+                                               GB_CHANNEL_MODE_TORCH);
+
+       /* For not flash we need to convert brightness to intensity */
+       intensity = channel->intensity_uA.min +
+                       (channel->intensity_uA.step * channel->led->brightness);
+
+       return __gb_lights_flash_intensity_set(channel, intensity);
+}
+#else /* LED_HAVE_FLASH */
+static struct gb_channel *get_channel_from_cdev(struct led_classdev *cdev)
+{
+       return container_of(cdev, struct gb_channel, cled);
+}
+
+static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
+{
+       return &channel->cled;
+}
+
+static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
+{
+       return 0;
+}
+#endif /* !LED_HAVE_FLASH */
+
+static int gb_lights_color_set(struct gb_channel *channel, u32 color);
+static int gb_lights_fade_set(struct gb_channel *channel);
+
+#ifdef LED_HAVE_LOCK
+static void led_lock(struct led_classdev *cdev)
+{
+       mutex_lock(&cdev->led_access);
+}
+
+static void led_unlock(struct led_classdev *cdev)
+{
+       mutex_unlock(&cdev->led_access);
+}
+#else
+static void led_lock(struct led_classdev *cdev)
+{
+}
+
+static void led_unlock(struct led_classdev *cdev)
+{
+}
+#endif /* !LED_HAVE_LOCK */
+
+#define gb_lights_fade_attr(__dir)                                     \
+static ssize_t fade_##__dir##_show(struct device *dev,                 \
+                                  struct device_attribute *attr,       \
+                                  char *buf)                           \
+{                                                                      \
+       struct led_classdev *cdev = dev_get_drvdata(dev);               \
+       struct gb_channel *channel = get_channel_from_cdev(cdev);       \
+                                                                       \
+       return sprintf(buf, "%u\n", channel->fade_##__dir);             \
+}                                                                      \
+                                                                       \
+static ssize_t fade_##__dir##_store(struct device *dev,                        \
+                                   struct device_attribute *attr,      \
+                                   const char *buf, size_t size)       \
+{                                                                      \
+       struct led_classdev *cdev = dev_get_drvdata(dev);               \
+       struct gb_channel *channel = get_channel_from_cdev(cdev);       \
+       u8 fade;                                                        \
+       int ret;                                                        \
+                                                                       \
+       led_lock(cdev);                                                 \
+       if (led_sysfs_is_disabled(cdev)) {                              \
+               ret = -EBUSY;                                           \
+               goto unlock;                                            \
+       }                                                               \
+                                                                       \
+       ret = kstrtou8(buf, 0, &fade);                                  \
+       if (ret < 0) {                                                  \
+               dev_err(dev, "could not parse fade value %d\n", ret);   \
+               goto unlock;                                            \
+       }                                                               \
+       if (channel->fade_##__dir == fade)                              \
+               goto unlock;                                            \
+       channel->fade_##__dir = fade;                                   \
+                                                                       \
+       ret = gb_lights_fade_set(channel);                              \
+       if (ret < 0)                                                    \
+               goto unlock;                                            \
+                                                                       \
+       ret = size;                                                     \
+unlock:                                                                        \
+       led_unlock(cdev);                                               \
+       return ret;                                                     \
+}                                                                      \
+static DEVICE_ATTR_RW(fade_##__dir)
+
+gb_lights_fade_attr(in);
+gb_lights_fade_attr(out);
+
+static ssize_t color_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct led_classdev *cdev = dev_get_drvdata(dev);
+       struct gb_channel *channel = get_channel_from_cdev(cdev);
+
+       return sprintf(buf, "0x%08x\n", channel->color);
+}
+
+static ssize_t color_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t size)
+{
+       struct led_classdev *cdev = dev_get_drvdata(dev);
+       struct gb_channel *channel = get_channel_from_cdev(cdev);
+       u32 color;
+       int ret;
+
+       led_lock(cdev);
+       if (led_sysfs_is_disabled(cdev)) {
+               ret = -EBUSY;
+               goto unlock;
+       }
+       ret = kstrtou32(buf, 0, &color);
+       if (ret < 0) {
+               dev_err(dev, "could not parse color value %d\n", ret);
+               goto unlock;
+       }
+
+       ret = gb_lights_color_set(channel, color);
+       if (ret < 0)
+               goto unlock;
+
+       channel->color = color;
+       ret = size;
+unlock:
+       led_unlock(cdev);
+       return ret;
+}
+static DEVICE_ATTR_RW(color);
+
+static int channel_attr_groups_set(struct gb_channel *channel,
+                                  struct led_classdev *cdev)
+{
+       int attr = 0;
+       int size = 0;
+
+       if (channel->flags & GB_LIGHT_CHANNEL_MULTICOLOR)
+               size++;
+       if (channel->flags & GB_LIGHT_CHANNEL_FADER)
+               size += 2;
+
+       if (!size)
+               return 0;
+
+       /* Set attributes based in the channel flags */
+       channel->attrs = kcalloc(size + 1, sizeof(**channel->attrs),
+                                GFP_KERNEL);
+       if (!channel->attrs)
+               return -ENOMEM;
+       channel->attr_group = kcalloc(1, sizeof(*channel->attr_group),
+                                     GFP_KERNEL);
+       if (!channel->attr_group)
+               return -ENOMEM;
+       channel->attr_groups = kcalloc(2, sizeof(*channel->attr_groups),
+                                      GFP_KERNEL);
+       if (!channel->attr_groups)
+               return -ENOMEM;
+
+       if (channel->flags & GB_LIGHT_CHANNEL_MULTICOLOR)
+               channel->attrs[attr++] = &dev_attr_color.attr;
+       if (channel->flags & GB_LIGHT_CHANNEL_FADER) {
+               channel->attrs[attr++] = &dev_attr_fade_in.attr;
+               channel->attrs[attr++] = &dev_attr_fade_out.attr;
+       }
+
+       channel->attr_group->attrs = channel->attrs;
+
+       channel->attr_groups[0] = channel->attr_group;
+
+       cdev->groups = channel->attr_groups;
+
+       return 0;
+}
+
+static int gb_lights_fade_set(struct gb_channel *channel)
+{
+       struct gb_connection *connection = get_conn_from_channel(channel);
+       struct gb_bundle *bundle = connection->bundle;
+       struct gb_lights_set_fade_request req;
+       int ret;
+
+       if (channel->releasing)
+               return -ESHUTDOWN;
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret < 0)
+               return ret;
+
+       req.light_id = channel->light->id;
+       req.channel_id = channel->id;
+       req.fade_in = channel->fade_in;
+       req.fade_out = channel->fade_out;
+       ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FADE,
+                               &req, sizeof(req), NULL, 0);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return ret;
+}
+
+static int gb_lights_color_set(struct gb_channel *channel, u32 color)
+{
+       struct gb_connection *connection = get_conn_from_channel(channel);
+       struct gb_bundle *bundle = connection->bundle;
+       struct gb_lights_set_color_request req;
+       int ret;
+
+       if (channel->releasing)
+               return -ESHUTDOWN;
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret < 0)
+               return ret;
+
+       req.light_id = channel->light->id;
+       req.channel_id = channel->id;
+       req.color = cpu_to_le32(color);
+       ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_COLOR,
+                               &req, sizeof(req), NULL, 0);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return ret;
+}
+
+static int __gb_lights_led_brightness_set(struct gb_channel *channel)
+{
+       struct gb_lights_set_brightness_request req;
+       struct gb_connection *connection = get_conn_from_channel(channel);
+       struct gb_bundle *bundle = connection->bundle;
+       bool old_active;
+       int ret;
+
+       mutex_lock(&channel->lock);
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret < 0)
+               goto out_unlock;
+
+       old_active = channel->active;
+
+       req.light_id = channel->light->id;
+       req.channel_id = channel->id;
+       req.brightness = (u8)channel->led->brightness;
+
+       ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_BRIGHTNESS,
+                               &req, sizeof(req), NULL, 0);
+       if (ret < 0)
+               goto out_pm_put;
+
+       if (channel->led->brightness)
+               channel->active = true;
+       else
+               channel->active = false;
+
+       /* we need to keep module alive when turning to active state */
+       if (!old_active && channel->active)
+               goto out_unlock;
+
+       /*
+        * on the other hand if going to inactive we still hold a reference and
+        * need to put it, so we could go to suspend.
+        */
+       if (old_active && !channel->active)
+               gb_pm_runtime_put_autosuspend(bundle);
+
+out_pm_put:
+       gb_pm_runtime_put_autosuspend(bundle);
+out_unlock:
+       mutex_unlock(&channel->lock);
+
+       return ret;
+}
+
+static int __gb_lights_brightness_set(struct gb_channel *channel)
+{
+       int ret;
+
+       if (channel->releasing)
+               return 0;
+
+       if (is_channel_flash(channel))
+               ret = __gb_lights_flash_brightness_set(channel);
+       else
+               ret = __gb_lights_led_brightness_set(channel);
+
+       return ret;
+}
+
+#ifndef LED_HAVE_SET_BLOCKING
+static void gb_brightness_set_work(struct work_struct *work)
+{
+       struct gb_channel *channel = container_of(work, struct gb_channel,
+                                                 work_brightness_set);
+
+       __gb_lights_brightness_set(channel);
+}
+
+#ifdef LED_HAVE_SET_SYNC
+static int gb_brightness_set_sync(struct led_classdev *cdev,
+                                 enum led_brightness value)
+{
+       struct gb_channel *channel = get_channel_from_cdev(cdev);
+
+       channel->led->brightness = value;
+
+       return __gb_lights_brightness_set(channel);
+}
+#endif
+
+static void gb_brightness_set(struct led_classdev *cdev,
+                             enum led_brightness value)
+{
+       struct gb_channel *channel = get_channel_from_cdev(cdev);
+
+       if (channel->releasing)
+               return;
+
+       cdev->brightness = value;
+       schedule_work(&channel->work_brightness_set);
+}
+#else /* LED_HAVE_SET_BLOCKING */
+static int gb_brightness_set(struct led_classdev *cdev,
+                            enum led_brightness value)
+{
+       struct gb_channel *channel = get_channel_from_cdev(cdev);
+
+       channel->led->brightness = value;
+
+       return __gb_lights_brightness_set(channel);
+}
+#endif
+
+static enum led_brightness gb_brightness_get(struct led_classdev *cdev)
+
+{
+       struct gb_channel *channel = get_channel_from_cdev(cdev);
+
+       return channel->led->brightness;
+}
+
+static int gb_blink_set(struct led_classdev *cdev, unsigned long *delay_on,
+                       unsigned long *delay_off)
+{
+       struct gb_channel *channel = get_channel_from_cdev(cdev);
+       struct gb_connection *connection = get_conn_from_channel(channel);
+       struct gb_bundle *bundle = connection->bundle;
+       struct gb_lights_blink_request req;
+       bool old_active;
+       int ret;
+
+       if (channel->releasing)
+               return -ESHUTDOWN;
+
+       mutex_lock(&channel->lock);
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret < 0)
+               goto out_unlock;
+
+       old_active = channel->active;
+
+       req.light_id = channel->light->id;
+       req.channel_id = channel->id;
+       req.time_on_ms = cpu_to_le16(*delay_on);
+       req.time_off_ms = cpu_to_le16(*delay_off);
+
+       ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_BLINK, &req,
+                               sizeof(req), NULL, 0);
+       if (ret < 0)
+               goto out_pm_put;
+
+       if (delay_on)
+               channel->active = true;
+       else
+               channel->active = false;
+
+       /* we need to keep module alive when turning to active state */
+       if (!old_active && channel->active)
+               goto out_unlock;
+
+       /*
+        * on the other hand if going to inactive we still hold a reference and
+        * need to put it, so we could go to suspend.
+        */
+       if (old_active && !channel->active)
+               gb_pm_runtime_put_autosuspend(bundle);
+
+out_pm_put:
+       gb_pm_runtime_put_autosuspend(bundle);
+out_unlock:
+       mutex_unlock(&channel->lock);
+
+       return ret;
+}
+
+static void gb_lights_led_operations_set(struct gb_channel *channel,
+                                        struct led_classdev *cdev)
+{
+       cdev->brightness_get = gb_brightness_get;
+#ifdef LED_HAVE_SET_SYNC
+       cdev->brightness_set_sync = gb_brightness_set_sync;
+#endif
+#ifdef LED_HAVE_SET_BLOCKING
+       cdev->brightness_set_blocking = gb_brightness_set;
+#endif
+#ifndef LED_HAVE_SET_BLOCKING
+       cdev->brightness_set = gb_brightness_set;
+       INIT_WORK(&channel->work_brightness_set, gb_brightness_set_work);
+#endif
+
+       if (channel->flags & GB_LIGHT_CHANNEL_BLINK)
+               cdev->blink_set = gb_blink_set;
+}
+
+#ifdef V4L2_HAVE_FLASH
+/* V4L2 specific helpers */
+static const struct v4l2_flash_ops v4l2_flash_ops;
+
+static void __gb_lights_channel_v4l2_config(struct led_flash_setting *channel_s,
+                                           struct led_flash_setting *v4l2_s)
+{
+       v4l2_s->min = channel_s->min;
+       v4l2_s->max = channel_s->max;
+       v4l2_s->step = channel_s->step;
+       /* For v4l2 val is the default value */
+       v4l2_s->val = channel_s->max;
+}
+
+static int gb_lights_light_v4l2_register(struct gb_light *light)
+{
+       struct gb_connection *connection = get_conn_from_light(light);
+       struct device *dev = &connection->bundle->dev;
+       struct v4l2_flash_config *sd_cfg;
+       struct led_classdev_flash *fled;
+       struct led_classdev_flash *iled = NULL;
+       struct gb_channel *channel_torch, *channel_ind, *channel_flash;
+       int ret = 0;
+
+       sd_cfg = kcalloc(1, sizeof(*sd_cfg), GFP_KERNEL);
+       if (!sd_cfg)
+               return -ENOMEM;
+
+       channel_torch = get_channel_from_mode(light, GB_CHANNEL_MODE_TORCH);
+       if (channel_torch)
+               __gb_lights_channel_v4l2_config(&channel_torch->intensity_uA,
+                                               &sd_cfg->torch_intensity);
+
+       channel_ind = get_channel_from_mode(light, GB_CHANNEL_MODE_INDICATOR);
+       if (channel_ind) {
+               __gb_lights_channel_v4l2_config(&channel_ind->intensity_uA,
+                                               &sd_cfg->indicator_intensity);
+               iled = &channel_ind->fled;
+       }
+
+       channel_flash = get_channel_from_mode(light, GB_CHANNEL_MODE_FLASH);
+       WARN_ON(!channel_flash);
+
+       fled = &channel_flash->fled;
+
+       snprintf(sd_cfg->dev_name, sizeof(sd_cfg->dev_name), "%s", light->name);
+
+       /* Set the possible values to faults, in our case all faults */
+       sd_cfg->flash_faults = LED_FAULT_OVER_VOLTAGE | LED_FAULT_TIMEOUT |
+               LED_FAULT_OVER_TEMPERATURE | LED_FAULT_SHORT_CIRCUIT |
+               LED_FAULT_OVER_CURRENT | LED_FAULT_INDICATOR |
+               LED_FAULT_UNDER_VOLTAGE | LED_FAULT_INPUT_VOLTAGE |
+               LED_FAULT_LED_OVER_TEMPERATURE;
+
+       light->v4l2_flash = v4l2_flash_init(dev, NULL, fled, iled,
+                                           &v4l2_flash_ops, sd_cfg);
+       if (IS_ERR_OR_NULL(light->v4l2_flash)) {
+               ret = PTR_ERR(light->v4l2_flash);
+               goto out_free;
+       }
+
+       return ret;
+
+out_free:
+       kfree(sd_cfg);
+       return ret;
+}
+
+static void gb_lights_light_v4l2_unregister(struct gb_light *light)
+{
+       v4l2_flash_release(light->v4l2_flash);
+}
+#else
+static int gb_lights_light_v4l2_register(struct gb_light *light)
+{
+       struct gb_connection *connection = get_conn_from_light(light);
+
+       dev_err(&connection->bundle->dev, "no support for v4l2 subdevices\n");
+       return 0;
+}
+
+static void gb_lights_light_v4l2_unregister(struct gb_light *light)
+{
+}
+#endif
+
+#ifdef LED_HAVE_FLASH
+/* Flash specific operations */
+static int gb_lights_flash_intensity_set(struct led_classdev_flash *fcdev,
+                                        u32 brightness)
+{
+       struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+                                                 fled);
+       int ret;
+
+       ret = __gb_lights_flash_intensity_set(channel, brightness);
+       if (ret < 0)
+               return ret;
+
+       fcdev->brightness.val = brightness;
+
+       return 0;
+}
+
+static int gb_lights_flash_intensity_get(struct led_classdev_flash *fcdev,
+                                        u32 *brightness)
+{
+       *brightness = fcdev->brightness.val;
+
+       return 0;
+}
+
+static int gb_lights_flash_strobe_set(struct led_classdev_flash *fcdev,
+                                     bool state)
+{
+       struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+                                                 fled);
+       struct gb_connection *connection = get_conn_from_channel(channel);
+       struct gb_bundle *bundle = connection->bundle;
+       struct gb_lights_set_flash_strobe_request req;
+       int ret;
+
+       if (channel->releasing)
+               return -ESHUTDOWN;
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret < 0)
+               return ret;
+
+       req.light_id = channel->light->id;
+       req.channel_id = channel->id;
+       req.state = state ? 1 : 0;
+
+       ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FLASH_STROBE,
+                               &req, sizeof(req), NULL, 0);
+       if (!ret)
+               channel->strobe_state = state;
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return ret;
+}
+
+static int gb_lights_flash_strobe_get(struct led_classdev_flash *fcdev,
+                                     bool *state)
+{
+       struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+                                                 fled);
+
+       *state = channel->strobe_state;
+       return 0;
+}
+
+static int gb_lights_flash_timeout_set(struct led_classdev_flash *fcdev,
+                                      u32 timeout)
+{
+       struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+                                                 fled);
+       struct gb_connection *connection = get_conn_from_channel(channel);
+       struct gb_bundle *bundle = connection->bundle;
+       struct gb_lights_set_flash_timeout_request req;
+       int ret;
+
+       if (channel->releasing)
+               return -ESHUTDOWN;
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret < 0)
+               return ret;
+
+       req.light_id = channel->light->id;
+       req.channel_id = channel->id;
+       req.timeout_us = cpu_to_le32(timeout);
+
+       ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT,
+                               &req, sizeof(req), NULL, 0);
+       if (!ret)
+               fcdev->timeout.val = timeout;
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return ret;
+}
+
+static int gb_lights_flash_fault_get(struct led_classdev_flash *fcdev,
+                                    u32 *fault)
+{
+       struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+                                                 fled);
+       struct gb_connection *connection = get_conn_from_channel(channel);
+       struct gb_bundle *bundle = connection->bundle;
+       struct gb_lights_get_flash_fault_request req;
+       struct gb_lights_get_flash_fault_response resp;
+       int ret;
+
+       if (channel->releasing)
+               return -ESHUTDOWN;
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret < 0)
+               return ret;
+
+       req.light_id = channel->light->id;
+       req.channel_id = channel->id;
+
+       ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_GET_FLASH_FAULT,
+                               &req, sizeof(req), &resp, sizeof(resp));
+       if (!ret)
+               *fault = le32_to_cpu(resp.fault);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return ret;
+}
+
+static const struct led_flash_ops gb_lights_flash_ops = {
+       .flash_brightness_set   = gb_lights_flash_intensity_set,
+       .flash_brightness_get   = gb_lights_flash_intensity_get,
+       .strobe_set             = gb_lights_flash_strobe_set,
+       .strobe_get             = gb_lights_flash_strobe_get,
+       .timeout_set            = gb_lights_flash_timeout_set,
+       .fault_get              = gb_lights_flash_fault_get,
+};
+
+static int __gb_lights_channel_torch_attach(struct gb_channel *channel,
+                                           struct gb_channel *channel_torch)
+{
+       char *name;
+
+       /* we can only attach torch to a flash channel */
+       if (!(channel->mode & GB_CHANNEL_MODE_FLASH))
+               return 0;
+
+       /* Move torch brightness to the destination */
+       channel->led->max_brightness = channel_torch->led->max_brightness;
+
+       /* append mode name to flash name */
+       name = kasprintf(GFP_KERNEL, "%s_%s", channel->led->name,
+                        channel_torch->mode_name);
+       if (!name)
+               return -ENOMEM;
+       kfree(channel->led->name);
+       channel->led->name = name;
+
+       channel_torch->led = channel->led;
+
+       return 0;
+}
+
+static int __gb_lights_flash_led_register(struct gb_channel *channel)
+{
+       struct gb_connection *connection = get_conn_from_channel(channel);
+       struct led_classdev_flash *fled = &channel->fled;
+       struct led_flash_setting *fset;
+       struct gb_channel *channel_torch;
+       int ret;
+
+       fled->ops = &gb_lights_flash_ops;
+
+       fled->led_cdev.flags |= LED_DEV_CAP_FLASH;
+
+       fset = &fled->brightness;
+       fset->min = channel->intensity_uA.min;
+       fset->max = channel->intensity_uA.max;
+       fset->step = channel->intensity_uA.step;
+       fset->val = channel->intensity_uA.max;
+
+       /* Only the flash mode have the timeout constraints settings */
+       if (channel->mode & GB_CHANNEL_MODE_FLASH) {
+               fset = &fled->timeout;
+               fset->min = channel->timeout_us.min;
+               fset->max = channel->timeout_us.max;
+               fset->step = channel->timeout_us.step;
+               fset->val = channel->timeout_us.max;
+       }
+
+       /*
+        * If light have torch mode channel, this channel will be the led
+        * classdev of the registered above flash classdev
+        */
+       channel_torch = get_channel_from_mode(channel->light,
+                                             GB_CHANNEL_MODE_TORCH);
+       if (channel_torch) {
+               ret = __gb_lights_channel_torch_attach(channel, channel_torch);
+               if (ret < 0)
+                       goto fail;
+       }
+
+       ret = led_classdev_flash_register(&connection->bundle->dev, fled);
+       if (ret < 0)
+               goto fail;
+
+       channel->is_registered = true;
+       return 0;
+fail:
+       channel->led = NULL;
+       return ret;
+}
+
+static void __gb_lights_flash_led_unregister(struct gb_channel *channel)
+{
+       if (!channel->is_registered)
+               return;
+
+       led_classdev_flash_unregister(&channel->fled);
+}
+
+static int gb_lights_channel_flash_config(struct gb_channel *channel)
+{
+       struct gb_connection *connection = get_conn_from_channel(channel);
+       struct gb_lights_get_channel_flash_config_request req;
+       struct gb_lights_get_channel_flash_config_response conf;
+       struct led_flash_setting *fset;
+       int ret;
+
+       req.light_id = channel->light->id;
+       req.channel_id = channel->id;
+
+       ret = gb_operation_sync(connection,
+                               GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG,
+                               &req, sizeof(req), &conf, sizeof(conf));
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Intensity constraints for flash related modes: flash, torch,
+        * indicator.  They will be needed for v4l2 registration.
+        */
+       fset = &channel->intensity_uA;
+       fset->min = le32_to_cpu(conf.intensity_min_uA);
+       fset->max = le32_to_cpu(conf.intensity_max_uA);
+       fset->step = le32_to_cpu(conf.intensity_step_uA);
+
+       /*
+        * On flash type, max brightness is set as the number of intensity steps
+        * available.
+        */
+       channel->led->max_brightness = (fset->max - fset->min) / fset->step;
+
+       /* Only the flash mode have the timeout constraints settings */
+       if (channel->mode & GB_CHANNEL_MODE_FLASH) {
+               fset = &channel->timeout_us;
+               fset->min = le32_to_cpu(conf.timeout_min_us);
+               fset->max = le32_to_cpu(conf.timeout_max_us);
+               fset->step = le32_to_cpu(conf.timeout_step_us);
+       }
+
+       return 0;
+}
+#else
+static int gb_lights_channel_flash_config(struct gb_channel *channel)
+{
+       struct gb_connection *connection = get_conn_from_channel(channel);
+
+       dev_err(&connection->bundle->dev, "no support for flash devices\n");
+       return 0;
+}
+
+static int __gb_lights_flash_led_register(struct gb_channel *channel)
+{
+       return 0;
+}
+
+static void __gb_lights_flash_led_unregister(struct gb_channel *channel)
+{
+}
+
+#endif /* LED_HAVE_FLASH */
+
+static int __gb_lights_led_register(struct gb_channel *channel)
+{
+       struct gb_connection *connection = get_conn_from_channel(channel);
+       struct led_classdev *cdev = get_channel_cdev(channel);
+       int ret;
+
+       ret = led_classdev_register(&connection->bundle->dev, cdev);
+       if (ret < 0)
+               channel->led = NULL;
+       else
+               channel->is_registered = true;
+       return ret;
+}
+
+static int gb_lights_channel_register(struct gb_channel *channel)
+{
+       /* Normal LED channel, just register in led classdev and we are done */
+       if (!is_channel_flash(channel))
+               return __gb_lights_led_register(channel);
+
+       /*
+        * Flash Type need more work, register flash classdev, indicator as
+        * flash classdev, torch will be led classdev of the flash classdev.
+        */
+       if (!(channel->mode & GB_CHANNEL_MODE_TORCH))
+               return __gb_lights_flash_led_register(channel);
+
+       return 0;
+}
+
+static void __gb_lights_led_unregister(struct gb_channel *channel)
+{
+       struct led_classdev *cdev = get_channel_cdev(channel);
+
+       if (!channel->is_registered)
+               return;
+
+       led_classdev_unregister(cdev);
+       channel->led = NULL;
+}
+
+static void gb_lights_channel_unregister(struct gb_channel *channel)
+{
+       /* The same as register, handle channels differently */
+       if (!is_channel_flash(channel)) {
+               __gb_lights_led_unregister(channel);
+               return;
+       }
+
+       if (channel->mode & GB_CHANNEL_MODE_TORCH)
+               __gb_lights_led_unregister(channel);
+       else
+               __gb_lights_flash_led_unregister(channel);
+}
+
+static int gb_lights_channel_config(struct gb_light *light,
+                                   struct gb_channel *channel)
+{
+       struct gb_lights_get_channel_config_response conf;
+       struct gb_lights_get_channel_config_request req;
+       struct gb_connection *connection = get_conn_from_light(light);
+       struct led_classdev *cdev = get_channel_cdev(channel);
+       char *name;
+       int ret;
+
+       req.light_id = light->id;
+       req.channel_id = channel->id;
+
+       ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG,
+                               &req, sizeof(req), &conf, sizeof(conf));
+       if (ret < 0)
+               return ret;
+
+       channel->light = light;
+       channel->mode = le32_to_cpu(conf.mode);
+       channel->flags = le32_to_cpu(conf.flags);
+       channel->color = le32_to_cpu(conf.color);
+       channel->color_name = kstrndup(conf.color_name, NAMES_MAX, GFP_KERNEL);
+       if (!channel->color_name)
+               return -ENOMEM;
+       channel->mode_name = kstrndup(conf.mode_name, NAMES_MAX, GFP_KERNEL);
+       if (!channel->mode_name)
+               return -ENOMEM;
+
+       channel->led = cdev;
+
+       name = kasprintf(GFP_KERNEL, "%s:%s:%s", light->name,
+                        channel->color_name, channel->mode_name);
+       if (!name)
+               return -ENOMEM;
+
+       cdev->name = name;
+
+       cdev->max_brightness = conf.max_brightness;
+
+       ret = channel_attr_groups_set(channel, cdev);
+       if (ret < 0)
+               return ret;
+
+       gb_lights_led_operations_set(channel, cdev);
+
+       /*
+        * If it is not a flash related channel (flash, torch or indicator) we
+        * are done here. If not, continue and fetch flash related
+        * configurations.
+        */
+       if (!is_channel_flash(channel))
+               return ret;
+
+       light->has_flash = true;
+
+       ret = gb_lights_channel_flash_config(channel);
+       if (ret < 0)
+               return ret;
+
+       return ret;
+}
+
+static int gb_lights_light_config(struct gb_lights *glights, u8 id)
+{
+       struct gb_light *light = &glights->lights[id];
+       struct gb_lights_get_light_config_request req;
+       struct gb_lights_get_light_config_response conf;
+       int ret;
+       int i;
+
+       light->glights = glights;
+       light->id = id;
+
+       req.id = id;
+
+       ret = gb_operation_sync(glights->connection,
+                               GB_LIGHTS_TYPE_GET_LIGHT_CONFIG,
+                               &req, sizeof(req), &conf, sizeof(conf));
+       if (ret < 0)
+               return ret;
+
+       if (!conf.channel_count)
+               return -EINVAL;
+       if (!strlen(conf.name))
+               return -EINVAL;
+
+       light->channels_count = conf.channel_count;
+       light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL);
+
+       light->channels = kzalloc(light->channels_count *
+                                 sizeof(struct gb_channel), GFP_KERNEL);
+       if (!light->channels)
+               return -ENOMEM;
+
+       /* First we collect all the configurations for all channels */
+       for (i = 0; i < light->channels_count; i++) {
+               light->channels[i].id = i;
+               ret = gb_lights_channel_config(light, &light->channels[i]);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int gb_lights_light_register(struct gb_light *light)
+{
+       int ret;
+       int i;
+
+       /*
+        * Then, if everything went ok in getting configurations, we register
+        * the classdev, flash classdev and v4l2 subsystem, if a flash device is
+        * found.
+        */
+       for (i = 0; i < light->channels_count; i++) {
+               ret = gb_lights_channel_register(&light->channels[i]);
+               if (ret < 0)
+                       return ret;
+
+               mutex_init(&light->channels[i].lock);
+       }
+
+       light->ready = true;
+
+       if (light->has_flash) {
+               ret = gb_lights_light_v4l2_register(light);
+               if (ret < 0) {
+                       light->has_flash = false;
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static void gb_lights_channel_free(struct gb_channel *channel)
+{
+#ifndef LED_HAVE_SET_BLOCKING
+       flush_work(&channel->work_brightness_set);
+#endif
+       kfree(channel->attrs);
+       kfree(channel->attr_group);
+       kfree(channel->attr_groups);
+       kfree(channel->color_name);
+       kfree(channel->mode_name);
+       mutex_destroy(&channel->lock);
+}
+
+static void gb_lights_channel_release(struct gb_channel *channel)
+{
+       channel->releasing = true;
+
+       gb_lights_channel_unregister(channel);
+
+       gb_lights_channel_free(channel);
+}
+
+static void gb_lights_light_release(struct gb_light *light)
+{
+       int i;
+       int count;
+
+       light->ready = false;
+
+       count = light->channels_count;
+
+       if (light->has_flash)
+               gb_lights_light_v4l2_unregister(light);
+
+       for (i = 0; i < count; i++) {
+               gb_lights_channel_release(&light->channels[i]);
+               light->channels_count--;
+       }
+       kfree(light->channels);
+       kfree(light->name);
+}
+
+static void gb_lights_release(struct gb_lights *glights)
+{
+       int i;
+
+       if (!glights)
+               return;
+
+       mutex_lock(&glights->lights_lock);
+       if (!glights->lights)
+               goto free_glights;
+
+       for (i = 0; i < glights->lights_count; i++)
+               gb_lights_light_release(&glights->lights[i]);
+
+       kfree(glights->lights);
+
+free_glights:
+       mutex_unlock(&glights->lights_lock);
+       mutex_destroy(&glights->lights_lock);
+       kfree(glights);
+}
+
+static int gb_lights_get_count(struct gb_lights *glights)
+{
+       struct gb_lights_get_lights_response resp;
+       int ret;
+
+       ret = gb_operation_sync(glights->connection, GB_LIGHTS_TYPE_GET_LIGHTS,
+                               NULL, 0, &resp, sizeof(resp));
+       if (ret < 0)
+               return ret;
+
+       if (!resp.lights_count)
+               return -EINVAL;
+
+       glights->lights_count = resp.lights_count;
+
+       return 0;
+}
+
+static int gb_lights_create_all(struct gb_lights *glights)
+{
+       struct gb_connection *connection = glights->connection;
+       int ret;
+       int i;
+
+       mutex_lock(&glights->lights_lock);
+       ret = gb_lights_get_count(glights);
+       if (ret < 0)
+               goto out;
+
+       glights->lights = kzalloc(glights->lights_count *
+                                 sizeof(struct gb_light), GFP_KERNEL);
+       if (!glights->lights) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       for (i = 0; i < glights->lights_count; i++) {
+               ret = gb_lights_light_config(glights, i);
+               if (ret < 0) {
+                       dev_err(&connection->bundle->dev,
+                               "Fail to configure lights device\n");
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&glights->lights_lock);
+       return ret;
+}
+
+static int gb_lights_register_all(struct gb_lights *glights)
+{
+       struct gb_connection *connection = glights->connection;
+       int ret = 0;
+       int i;
+
+       mutex_lock(&glights->lights_lock);
+       for (i = 0; i < glights->lights_count; i++) {
+               ret = gb_lights_light_register(&glights->lights[i]);
+               if (ret < 0) {
+                       dev_err(&connection->bundle->dev,
+                               "Fail to enable lights device\n");
+                       break;
+               }
+       }
+
+       mutex_unlock(&glights->lights_lock);
+       return ret;
+}
+
+static int gb_lights_request_handler(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct device *dev = &connection->bundle->dev;
+       struct gb_lights *glights = gb_connection_get_data(connection);
+       struct gb_light *light;
+       struct gb_message *request;
+       struct gb_lights_event_request *payload;
+       int ret =  0;
+       u8 light_id;
+       u8 event;
+
+       if (op->type != GB_LIGHTS_TYPE_EVENT) {
+               dev_err(dev, "Unsupported unsolicited event: %u\n", op->type);
+               return -EINVAL;
+       }
+
+       request = op->request;
+
+       if (request->payload_size < sizeof(*payload)) {
+               dev_err(dev, "Wrong event size received (%zu < %zu)\n",
+                       request->payload_size, sizeof(*payload));
+               return -EINVAL;
+       }
+
+       payload = request->payload;
+       light_id = payload->light_id;
+
+       if (light_id >= glights->lights_count ||
+           !glights->lights[light_id].ready) {
+               dev_err(dev, "Event received for unconfigured light id: %d\n",
+                       light_id);
+               return -EINVAL;
+       }
+
+       event = payload->event;
+
+       if (event & GB_LIGHTS_LIGHT_CONFIG) {
+               light = &glights->lights[light_id];
+
+               mutex_lock(&glights->lights_lock);
+               gb_lights_light_release(light);
+               ret = gb_lights_light_config(glights, light_id);
+               if (!ret)
+                       ret = gb_lights_light_register(light);
+               if (ret < 0)
+                       gb_lights_light_release(light);
+               mutex_unlock(&glights->lights_lock);
+       }
+
+       return ret;
+}
+
+static int gb_lights_probe(struct gb_bundle *bundle,
+                          const struct greybus_bundle_id *id)
+{
+       struct greybus_descriptor_cport *cport_desc;
+       struct gb_connection *connection;
+       struct gb_lights *glights;
+       int ret;
+
+       if (bundle->num_cports != 1)
+               return -ENODEV;
+
+       cport_desc = &bundle->cport_desc[0];
+       if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LIGHTS)
+               return -ENODEV;
+
+       glights = kzalloc(sizeof(*glights), GFP_KERNEL);
+       if (!glights)
+               return -ENOMEM;
+
+       mutex_init(&glights->lights_lock);
+
+       connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+                                         gb_lights_request_handler);
+       if (IS_ERR(connection)) {
+               ret = PTR_ERR(connection);
+               goto out;
+       }
+
+       glights->connection = connection;
+       gb_connection_set_data(connection, glights);
+
+       greybus_set_drvdata(bundle, glights);
+
+       /* We aren't ready to receive an incoming request yet */
+       ret = gb_connection_enable_tx(connection);
+       if (ret)
+               goto error_connection_destroy;
+
+       /*
+        * Setup all the lights devices over this connection, if anything goes
+        * wrong tear down all lights
+        */
+       ret = gb_lights_create_all(glights);
+       if (ret < 0)
+               goto error_connection_disable;
+
+       /* We are ready to receive an incoming request now, enable RX as well */
+       ret = gb_connection_enable(connection);
+       if (ret)
+               goto error_connection_disable;
+
+       /* Enable & register lights */
+       ret = gb_lights_register_all(glights);
+       if (ret < 0)
+               goto error_connection_disable;
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return 0;
+
+error_connection_disable:
+       gb_connection_disable(connection);
+error_connection_destroy:
+       gb_connection_destroy(connection);
+out:
+       gb_lights_release(glights);
+       return ret;
+}
+
+static void gb_lights_disconnect(struct gb_bundle *bundle)
+{
+       struct gb_lights *glights = greybus_get_drvdata(bundle);
+
+       if (gb_pm_runtime_get_sync(bundle))
+               gb_pm_runtime_get_noresume(bundle);
+
+       gb_connection_disable(glights->connection);
+       gb_connection_destroy(glights->connection);
+
+       gb_lights_release(glights);
+}
+
+static const struct greybus_bundle_id gb_lights_id_table[] = {
+       { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LIGHTS) },
+       { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_lights_id_table);
+
+static struct greybus_driver gb_lights_driver = {
+       .name           = "lights",
+       .probe          = gb_lights_probe,
+       .disconnect     = gb_lights_disconnect,
+       .id_table       = gb_lights_id_table,
+};
+module_greybus_driver(gb_lights_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/log.c b/drivers/staging/greybus/log.c
new file mode 100644 (file)
index 0000000..70dd9e5
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+ * Greybus driver for the log protocol
+ *
+ * Copyright 2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sizes.h>
+#include <linux/uaccess.h>
+
+#include "greybus.h"
+
+struct gb_log {
+       struct gb_connection *connection;
+};
+
+static int gb_log_request_handler(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct device *dev = &connection->bundle->dev;
+       struct gb_log_send_log_request *receive;
+       u16 len;
+
+       if (op->type != GB_LOG_TYPE_SEND_LOG) {
+               dev_err(dev, "unknown request type 0x%02x\n", op->type);
+               return -EINVAL;
+       }
+
+       /* Verify size of payload */
+       if (op->request->payload_size < sizeof(*receive)) {
+               dev_err(dev, "log request too small (%zu < %zu)\n",
+                               op->request->payload_size, sizeof(*receive));
+               return -EINVAL;
+       }
+       receive = op->request->payload;
+       len = le16_to_cpu(receive->len);
+       if (len != (int)(op->request->payload_size - sizeof(*receive))) {
+               dev_err(dev, "log request wrong size %d vs %d\n", len,
+                               (int)(op->request->payload_size - sizeof(*receive)));
+               return -EINVAL;
+       }
+       if (len == 0) {
+               dev_err(dev, "log request of 0 bytes?\n");
+               return -EINVAL;
+       }
+
+       if (len > GB_LOG_MAX_LEN) {
+               dev_err(dev, "log request too big: %d\n", len);
+               return -EINVAL;
+       }
+
+       /* Ensure the buffer is 0 terminated */
+       receive->msg[len - 1] = '\0';
+
+       /* Print with dev_dbg() so that it can be easily turned off using
+        * dynamic debugging (and prevent any DoS) */
+       dev_dbg(dev, "%s", receive->msg);
+
+       return 0;
+}
+
+static int gb_log_probe(struct gb_bundle *bundle,
+                       const struct greybus_bundle_id *id)
+{
+       struct greybus_descriptor_cport *cport_desc;
+       struct gb_connection *connection;
+       struct gb_log *log;
+       int retval;
+
+       if (bundle->num_cports != 1)
+               return -ENODEV;
+
+       cport_desc = &bundle->cport_desc[0];
+       if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOG)
+               return -ENODEV;
+
+       log = kzalloc(sizeof(*log), GFP_KERNEL);
+       if (!log)
+               return -ENOMEM;
+
+       connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+                       gb_log_request_handler);
+       if (IS_ERR(connection)) {
+               retval = PTR_ERR(connection);
+               goto error_free;
+       }
+
+       log->connection = connection;
+       greybus_set_drvdata(bundle, log);
+
+       retval = gb_connection_enable(connection);
+       if (retval)
+               goto error_connection_destroy;
+
+       return 0;
+
+error_connection_destroy:
+       gb_connection_destroy(connection);
+error_free:
+       kfree(log);
+       return retval;
+}
+
+static void gb_log_disconnect(struct gb_bundle *bundle)
+{
+       struct gb_log *log = greybus_get_drvdata(bundle);
+       struct gb_connection *connection = log->connection;
+
+       gb_connection_disable(connection);
+       gb_connection_destroy(connection);
+
+       kfree(log);
+}
+
+static const struct greybus_bundle_id gb_log_id_table[] = {
+       { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOG) },
+       { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_log_id_table);
+
+static struct greybus_driver gb_log_driver = {
+       .name           = "log",
+       .probe          = gb_log_probe,
+       .disconnect     = gb_log_disconnect,
+       .id_table       = gb_log_id_table,
+};
+module_greybus_driver(gb_log_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
new file mode 100644 (file)
index 0000000..8b0d0dc
--- /dev/null
@@ -0,0 +1,1365 @@
+/*
+ * Loopback bridge driver for the Greybus loopback module.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+#include <linux/sizes.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/kfifo.h>
+#include <linux/debugfs.h>
+#include <linux/list_sort.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/atomic.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/div64.h>
+
+#include "greybus.h"
+#include "connection.h"
+
+#define NSEC_PER_DAY 86400000000000ULL
+
+struct gb_loopback_stats {
+       u32 min;
+       u32 max;
+       u64 sum;
+       u32 count;
+};
+
+struct gb_loopback_device {
+       struct dentry *root;
+       u32 count;
+       size_t size_max;
+
+       /* We need to take a lock in atomic context */
+       spinlock_t lock;
+       struct list_head list;
+       struct list_head list_op_async;
+       wait_queue_head_t wq;
+};
+
+static struct gb_loopback_device gb_dev;
+
+struct gb_loopback_async_operation {
+       struct gb_loopback *gb;
+       struct gb_operation *operation;
+       struct timeval ts;
+       struct timer_list timer;
+       struct list_head entry;
+       struct work_struct work;
+       struct kref kref;
+       bool pending;
+       int (*completion)(struct gb_loopback_async_operation *op_async);
+};
+
+struct gb_loopback {
+       struct gb_connection *connection;
+
+       struct dentry *file;
+       struct kfifo kfifo_lat;
+       struct kfifo kfifo_ts;
+       struct mutex mutex;
+       struct task_struct *task;
+       struct list_head entry;
+       struct device *dev;
+       wait_queue_head_t wq;
+       wait_queue_head_t wq_completion;
+       atomic_t outstanding_operations;
+
+       /* Per connection stats */
+       struct timeval ts;
+       struct gb_loopback_stats latency;
+       struct gb_loopback_stats throughput;
+       struct gb_loopback_stats requests_per_second;
+       struct gb_loopback_stats apbridge_unipro_latency;
+       struct gb_loopback_stats gbphy_firmware_latency;
+
+       int type;
+       int async;
+       int id;
+       u32 size;
+       u32 iteration_max;
+       u32 iteration_count;
+       int us_wait;
+       u32 error;
+       u32 requests_completed;
+       u32 requests_timedout;
+       u32 timeout;
+       u32 jiffy_timeout;
+       u32 timeout_min;
+       u32 timeout_max;
+       u32 outstanding_operations_max;
+       u32 lbid;
+       u64 elapsed_nsecs;
+       u32 apbridge_latency_ts;
+       u32 gbphy_latency_ts;
+
+       u32 send_count;
+};
+
+static struct class loopback_class = {
+       .name           = "gb_loopback",
+       .owner          = THIS_MODULE,
+};
+static DEFINE_IDA(loopback_ida);
+
+/* Min/max values in jiffies */
+#define GB_LOOPBACK_TIMEOUT_MIN                                1
+#define GB_LOOPBACK_TIMEOUT_MAX                                10000
+
+#define GB_LOOPBACK_FIFO_DEFAULT                       8192
+
+static unsigned kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
+module_param(kfifo_depth, uint, 0444);
+
+/* Maximum size of any one send data buffer we support */
+#define MAX_PACKET_SIZE (PAGE_SIZE * 2)
+
+#define GB_LOOPBACK_US_WAIT_MAX                                1000000
+
+/* interface sysfs attributes */
+#define gb_loopback_ro_attr(field)                             \
+static ssize_t field##_show(struct device *dev,                        \
+                           struct device_attribute *attr,              \
+                           char *buf)                                  \
+{                                                                      \
+       struct gb_loopback *gb = dev_get_drvdata(dev);                  \
+       return sprintf(buf, "%u\n", gb->field);                 \
+}                                                                      \
+static DEVICE_ATTR_RO(field)
+
+#define gb_loopback_ro_stats_attr(name, field, type)           \
+static ssize_t name##_##field##_show(struct device *dev,       \
+                           struct device_attribute *attr,              \
+                           char *buf)                                  \
+{                                                                      \
+       struct gb_loopback *gb = dev_get_drvdata(dev);                  \
+       /* Report 0 for min and max if no transfer successed */         \
+       if (!gb->requests_completed)                                    \
+               return sprintf(buf, "0\n");                             \
+       return sprintf(buf, "%"#type"\n", gb->name.field);      \
+}                                                                      \
+static DEVICE_ATTR_RO(name##_##field)
+
+#define gb_loopback_ro_avg_attr(name)                  \
+static ssize_t name##_avg_show(struct device *dev,             \
+                           struct device_attribute *attr,              \
+                           char *buf)                                  \
+{                                                                      \
+       struct gb_loopback_stats *stats;                                \
+       struct gb_loopback *gb;                                         \
+       u64 avg, rem;                                                   \
+       u32 count;                                                      \
+       gb = dev_get_drvdata(dev);                      \
+       stats = &gb->name;                                      \
+       count = stats->count ? stats->count : 1;                        \
+       avg = stats->sum + count / 2000000; /* round closest */         \
+       rem = do_div(avg, count);                                       \
+       rem *= 1000000;                                                 \
+       do_div(rem, count);                                             \
+       return sprintf(buf, "%llu.%06u\n", avg, (u32)rem);              \
+}                                                                      \
+static DEVICE_ATTR_RO(name##_avg)
+
+#define gb_loopback_stats_attrs(field)                         \
+       gb_loopback_ro_stats_attr(field, min, u);               \
+       gb_loopback_ro_stats_attr(field, max, u);               \
+       gb_loopback_ro_avg_attr(field)
+
+#define gb_loopback_attr(field, type)                                  \
+static ssize_t field##_show(struct device *dev,                                \
+                           struct device_attribute *attr,              \
+                           char *buf)                                  \
+{                                                                      \
+       struct gb_loopback *gb = dev_get_drvdata(dev);                  \
+       return sprintf(buf, "%"#type"\n", gb->field);                   \
+}                                                                      \
+static ssize_t field##_store(struct device *dev,                       \
+                           struct device_attribute *attr,              \
+                           const char *buf,                            \
+                           size_t len)                                 \
+{                                                                      \
+       int ret;                                                        \
+       struct gb_loopback *gb = dev_get_drvdata(dev);                  \
+       mutex_lock(&gb->mutex);                                         \
+       ret = sscanf(buf, "%"#type, &gb->field);                        \
+       if (ret != 1)                                                   \
+               len = -EINVAL;                                          \
+       else                                                            \
+               gb_loopback_check_attr(gb, bundle);                     \
+       mutex_unlock(&gb->mutex);                                       \
+       return len;                                                     \
+}                                                                      \
+static DEVICE_ATTR_RW(field)
+
+#define gb_dev_loopback_ro_attr(field, conn)                           \
+static ssize_t field##_show(struct device *dev,                \
+                           struct device_attribute *attr,              \
+                           char *buf)                                  \
+{                                                                      \
+       struct gb_loopback *gb = dev_get_drvdata(dev);                  \
+       return sprintf(buf, "%u\n", gb->field);                         \
+}                                                                      \
+static DEVICE_ATTR_RO(field)
+
+#define gb_dev_loopback_rw_attr(field, type)                           \
+static ssize_t field##_show(struct device *dev,                                \
+                           struct device_attribute *attr,              \
+                           char *buf)                                  \
+{                                                                      \
+       struct gb_loopback *gb = dev_get_drvdata(dev);                  \
+       return sprintf(buf, "%"#type"\n", gb->field);                   \
+}                                                                      \
+static ssize_t field##_store(struct device *dev,                       \
+                           struct device_attribute *attr,              \
+                           const char *buf,                            \
+                           size_t len)                                 \
+{                                                                      \
+       int ret;                                                        \
+       struct gb_loopback *gb = dev_get_drvdata(dev);                  \
+       mutex_lock(&gb->mutex);                                         \
+       ret = sscanf(buf, "%"#type, &gb->field);                        \
+       if (ret != 1)                                                   \
+               len = -EINVAL;                                          \
+       else                                                            \
+               gb_loopback_check_attr(gb);             \
+       mutex_unlock(&gb->mutex);                                       \
+       return len;                                                     \
+}                                                                      \
+static DEVICE_ATTR_RW(field)
+
+static void gb_loopback_reset_stats(struct gb_loopback *gb);
+static void gb_loopback_check_attr(struct gb_loopback *gb)
+{
+       if (gb->us_wait > GB_LOOPBACK_US_WAIT_MAX)
+               gb->us_wait = GB_LOOPBACK_US_WAIT_MAX;
+       if (gb->size > gb_dev.size_max)
+               gb->size = gb_dev.size_max;
+       gb->requests_timedout = 0;
+       gb->requests_completed = 0;
+       gb->iteration_count = 0;
+       gb->send_count = 0;
+       gb->error = 0;
+
+       if (kfifo_depth < gb->iteration_max) {
+               dev_warn(gb->dev,
+                        "cannot log bytes %u kfifo_depth %u\n",
+                        gb->iteration_max, kfifo_depth);
+       }
+       kfifo_reset_out(&gb->kfifo_lat);
+       kfifo_reset_out(&gb->kfifo_ts);
+
+       switch (gb->type) {
+       case GB_LOOPBACK_TYPE_PING:
+       case GB_LOOPBACK_TYPE_TRANSFER:
+       case GB_LOOPBACK_TYPE_SINK:
+               gb->jiffy_timeout = usecs_to_jiffies(gb->timeout);
+               if (!gb->jiffy_timeout)
+                       gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN;
+               else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX)
+                       gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX;
+               gb_loopback_reset_stats(gb);
+               wake_up(&gb->wq);
+               break;
+       default:
+               gb->type = 0;
+               break;
+       }
+}
+
+/* Time to send and receive one message */
+gb_loopback_stats_attrs(latency);
+/* Number of requests sent per second on this cport */
+gb_loopback_stats_attrs(requests_per_second);
+/* Quantity of data sent and received on this cport */
+gb_loopback_stats_attrs(throughput);
+/* Latency across the UniPro link from APBridge's perspective */
+gb_loopback_stats_attrs(apbridge_unipro_latency);
+/* Firmware induced overhead in the GPBridge */
+gb_loopback_stats_attrs(gbphy_firmware_latency);
+
+/* Number of errors encountered during loop */
+gb_loopback_ro_attr(error);
+/* Number of requests successfully completed async */
+gb_loopback_ro_attr(requests_completed);
+/* Number of requests timed out async */
+gb_loopback_ro_attr(requests_timedout);
+/* Timeout minimum in useconds */
+gb_loopback_ro_attr(timeout_min);
+/* Timeout minimum in useconds */
+gb_loopback_ro_attr(timeout_max);
+
+/*
+ * Type of loopback message to send based on protocol type definitions
+ * 0 => Don't send message
+ * 2 => Send ping message continuously (message without payload)
+ * 3 => Send transfer message continuously (message with payload,
+ *                                        payload returned in response)
+ * 4 => Send a sink message (message with payload, no payload in response)
+ */
+gb_dev_loopback_rw_attr(type, d);
+/* Size of transfer message payload: 0-4096 bytes */
+gb_dev_loopback_rw_attr(size, u);
+/* Time to wait between two messages: 0-1000 ms */
+gb_dev_loopback_rw_attr(us_wait, d);
+/* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
+gb_dev_loopback_rw_attr(iteration_max, u);
+/* The current index of the for (i = 0; i < iteration_max; i++) loop */
+gb_dev_loopback_ro_attr(iteration_count, false);
+/* A flag to indicate synchronous or asynchronous operations */
+gb_dev_loopback_rw_attr(async, u);
+/* Timeout of an individual asynchronous request */
+gb_dev_loopback_rw_attr(timeout, u);
+/* Maximum number of in-flight operations before back-off */
+gb_dev_loopback_rw_attr(outstanding_operations_max, u);
+
+static struct attribute *loopback_attrs[] = {
+       &dev_attr_latency_min.attr,
+       &dev_attr_latency_max.attr,
+       &dev_attr_latency_avg.attr,
+       &dev_attr_requests_per_second_min.attr,
+       &dev_attr_requests_per_second_max.attr,
+       &dev_attr_requests_per_second_avg.attr,
+       &dev_attr_throughput_min.attr,
+       &dev_attr_throughput_max.attr,
+       &dev_attr_throughput_avg.attr,
+       &dev_attr_apbridge_unipro_latency_min.attr,
+       &dev_attr_apbridge_unipro_latency_max.attr,
+       &dev_attr_apbridge_unipro_latency_avg.attr,
+       &dev_attr_gbphy_firmware_latency_min.attr,
+       &dev_attr_gbphy_firmware_latency_max.attr,
+       &dev_attr_gbphy_firmware_latency_avg.attr,
+       &dev_attr_type.attr,
+       &dev_attr_size.attr,
+       &dev_attr_us_wait.attr,
+       &dev_attr_iteration_count.attr,
+       &dev_attr_iteration_max.attr,
+       &dev_attr_async.attr,
+       &dev_attr_error.attr,
+       &dev_attr_requests_completed.attr,
+       &dev_attr_requests_timedout.attr,
+       &dev_attr_timeout.attr,
+       &dev_attr_outstanding_operations_max.attr,
+       &dev_attr_timeout_min.attr,
+       &dev_attr_timeout_max.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(loopback);
+
+static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error);
+
+static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
+{
+       u32 lat;
+
+       do_div(elapsed_nsecs, NSEC_PER_USEC);
+       lat = elapsed_nsecs;
+       return lat;
+}
+
+static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
+{
+       if (t2 > t1)
+               return t2 - t1;
+       else
+               return NSEC_PER_DAY - t2 + t1;
+}
+
+static u64 gb_loopback_calc_latency(struct timeval *ts, struct timeval *te)
+{
+       u64 t1, t2;
+
+       t1 = timeval_to_ns(ts);
+       t2 = timeval_to_ns(te);
+
+       return __gb_loopback_calc_latency(t1, t2);
+}
+
+static void gb_loopback_push_latency_ts(struct gb_loopback *gb,
+                                       struct timeval *ts, struct timeval *te)
+{
+       kfifo_in(&gb->kfifo_ts, (unsigned char *)ts, sizeof(*ts));
+       kfifo_in(&gb->kfifo_ts, (unsigned char *)te, sizeof(*te));
+}
+
+static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
+                                     void *request, int request_size,
+                                     void *response, int response_size)
+{
+       struct gb_operation *operation;
+       struct timeval ts, te;
+       int ret;
+
+       do_gettimeofday(&ts);
+       operation = gb_operation_create(gb->connection, type, request_size,
+                                       response_size, GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       if (request_size)
+               memcpy(operation->request->payload, request, request_size);
+
+       ret = gb_operation_request_send_sync(operation);
+       if (ret) {
+               dev_err(&gb->connection->bundle->dev,
+                       "synchronous operation failed: %d\n", ret);
+               goto out_put_operation;
+       } else {
+               if (response_size == operation->response->payload_size) {
+                       memcpy(response, operation->response->payload,
+                              response_size);
+               } else {
+                       dev_err(&gb->connection->bundle->dev,
+                               "response size %zu expected %d\n",
+                               operation->response->payload_size,
+                               response_size);
+                       ret = -EINVAL;
+                       goto out_put_operation;
+               }
+       }
+
+       do_gettimeofday(&te);
+
+       /* Calculate the total time the message took */
+       gb_loopback_push_latency_ts(gb, &ts, &te);
+       gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
+
+out_put_operation:
+       gb_operation_put(operation);
+
+       return ret;
+}
+
+static void __gb_loopback_async_operation_destroy(struct kref *kref)
+{
+       struct gb_loopback_async_operation *op_async;
+
+       op_async = container_of(kref, struct gb_loopback_async_operation, kref);
+
+       list_del(&op_async->entry);
+       if (op_async->operation)
+               gb_operation_put(op_async->operation);
+       atomic_dec(&op_async->gb->outstanding_operations);
+       wake_up(&op_async->gb->wq_completion);
+       kfree(op_async);
+}
+
+static void gb_loopback_async_operation_get(struct gb_loopback_async_operation
+                                           *op_async)
+{
+       kref_get(&op_async->kref);
+}
+
+static void gb_loopback_async_operation_put(struct gb_loopback_async_operation
+                                           *op_async)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&gb_dev.lock, flags);
+       kref_put(&op_async->kref, __gb_loopback_async_operation_destroy);
+       spin_unlock_irqrestore(&gb_dev.lock, flags);
+}
+
+static struct gb_loopback_async_operation *
+       gb_loopback_operation_find(u16 id)
+{
+       struct gb_loopback_async_operation *op_async;
+       bool found = false;
+       unsigned long flags;
+
+       spin_lock_irqsave(&gb_dev.lock, flags);
+       list_for_each_entry(op_async, &gb_dev.list_op_async, entry) {
+               if (op_async->operation->id == id) {
+                       gb_loopback_async_operation_get(op_async);
+                       found = true;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&gb_dev.lock, flags);
+
+       return found ? op_async : NULL;
+}
+
+static void gb_loopback_async_wait_all(struct gb_loopback *gb)
+{
+       wait_event(gb->wq_completion,
+                  !atomic_read(&gb->outstanding_operations));
+}
+
+static void gb_loopback_async_operation_callback(struct gb_operation *operation)
+{
+       struct gb_loopback_async_operation *op_async;
+       struct gb_loopback *gb;
+       struct timeval te;
+       bool err = false;
+
+       do_gettimeofday(&te);
+       op_async = gb_loopback_operation_find(operation->id);
+       if (!op_async)
+               return;
+
+       gb = op_async->gb;
+       mutex_lock(&gb->mutex);
+
+       if (!op_async->pending || gb_operation_result(operation)) {
+               err = true;
+       } else {
+               if (op_async->completion)
+                       if (op_async->completion(op_async))
+                               err = true;
+       }
+
+       if (!err) {
+               gb_loopback_push_latency_ts(gb, &op_async->ts, &te);
+               gb->elapsed_nsecs = gb_loopback_calc_latency(&op_async->ts,
+                                                            &te);
+       }
+
+       if (op_async->pending) {
+               if (err)
+                       gb->error++;
+               gb->iteration_count++;
+               op_async->pending = false;
+               del_timer_sync(&op_async->timer);
+               gb_loopback_async_operation_put(op_async);
+               gb_loopback_calculate_stats(gb, err);
+       }
+       mutex_unlock(&gb->mutex);
+
+       dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n",
+               operation->id);
+
+       gb_loopback_async_operation_put(op_async);
+}
+
+static void gb_loopback_async_operation_work(struct work_struct *work)
+{
+       struct gb_loopback *gb;
+       struct gb_operation *operation;
+       struct gb_loopback_async_operation *op_async;
+
+       op_async = container_of(work, struct gb_loopback_async_operation, work);
+       gb = op_async->gb;
+       operation = op_async->operation;
+
+       mutex_lock(&gb->mutex);
+       if (op_async->pending) {
+               gb->requests_timedout++;
+               gb->error++;
+               gb->iteration_count++;
+               op_async->pending = false;
+               gb_loopback_async_operation_put(op_async);
+               gb_loopback_calculate_stats(gb, true);
+       }
+       mutex_unlock(&gb->mutex);
+
+       dev_dbg(&gb->connection->bundle->dev, "timeout operation %d\n",
+               operation->id);
+
+       gb_operation_cancel(operation, -ETIMEDOUT);
+       gb_loopback_async_operation_put(op_async);
+}
+
+static void gb_loopback_async_operation_timeout(unsigned long data)
+{
+       struct gb_loopback_async_operation *op_async;
+       u16 id = data;
+
+       op_async = gb_loopback_operation_find(id);
+       if (!op_async) {
+               pr_err("operation %d not found - time out ?\n", id);
+               return;
+       }
+       schedule_work(&op_async->work);
+}
+
+static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
+                                      void *request, int request_size,
+                                      int response_size,
+                                      void *completion)
+{
+       struct gb_loopback_async_operation *op_async;
+       struct gb_operation *operation;
+       int ret;
+       unsigned long flags;
+
+       op_async = kzalloc(sizeof(*op_async), GFP_KERNEL);
+       if (!op_async)
+               return -ENOMEM;
+
+       INIT_WORK(&op_async->work, gb_loopback_async_operation_work);
+       init_timer(&op_async->timer);
+       kref_init(&op_async->kref);
+
+       operation = gb_operation_create(gb->connection, type, request_size,
+                                       response_size, GFP_KERNEL);
+       if (!operation) {
+               kfree(op_async);
+               return -ENOMEM;
+       }
+
+       if (request_size)
+               memcpy(operation->request->payload, request, request_size);
+
+       op_async->gb = gb;
+       op_async->operation = operation;
+       op_async->completion = completion;
+
+       spin_lock_irqsave(&gb_dev.lock, flags);
+       list_add_tail(&op_async->entry, &gb_dev.list_op_async);
+       spin_unlock_irqrestore(&gb_dev.lock, flags);
+
+       do_gettimeofday(&op_async->ts);
+       op_async->pending = true;
+       atomic_inc(&gb->outstanding_operations);
+       mutex_lock(&gb->mutex);
+       ret = gb_operation_request_send(operation,
+                                       gb_loopback_async_operation_callback,
+                                       GFP_KERNEL);
+       if (ret)
+               goto error;
+
+       op_async->timer.function = gb_loopback_async_operation_timeout;
+       op_async->timer.expires = jiffies + gb->jiffy_timeout;
+       op_async->timer.data = (unsigned long)operation->id;
+       add_timer(&op_async->timer);
+
+       goto done;
+error:
+       gb_loopback_async_operation_put(op_async);
+done:
+       mutex_unlock(&gb->mutex);
+       return ret;
+}
+
+static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len)
+{
+       struct gb_loopback_transfer_request *request;
+       int retval;
+
+       request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+       if (!request)
+               return -ENOMEM;
+
+       request->len = cpu_to_le32(len);
+       retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
+                                           request, len + sizeof(*request),
+                                           NULL, 0);
+       kfree(request);
+       return retval;
+}
+
+static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len)
+{
+       struct gb_loopback_transfer_request *request;
+       struct gb_loopback_transfer_response *response;
+       int retval;
+
+       gb->apbridge_latency_ts = 0;
+       gb->gbphy_latency_ts = 0;
+
+       request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+       if (!request)
+               return -ENOMEM;
+       response = kmalloc(len + sizeof(*response), GFP_KERNEL);
+       if (!response) {
+               kfree(request);
+               return -ENOMEM;
+       }
+
+       memset(request->data, 0x5A, len);
+
+       request->len = cpu_to_le32(len);
+       retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
+                                           request, len + sizeof(*request),
+                                           response, len + sizeof(*response));
+       if (retval)
+               goto gb_error;
+
+       if (memcmp(request->data, response->data, len)) {
+               dev_err(&gb->connection->bundle->dev,
+                       "Loopback Data doesn't match\n");
+               retval = -EREMOTEIO;
+       }
+       gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0);
+       gb->gbphy_latency_ts = (u32)__le32_to_cpu(response->reserved1);
+
+gb_error:
+       kfree(request);
+       kfree(response);
+
+       return retval;
+}
+
+static int gb_loopback_sync_ping(struct gb_loopback *gb)
+{
+       return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
+                                         NULL, 0, NULL, 0);
+}
+
+static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len)
+{
+       struct gb_loopback_transfer_request *request;
+       int retval;
+
+       request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+       if (!request)
+               return -ENOMEM;
+
+       request->len = cpu_to_le32(len);
+       retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK,
+                                            request, len + sizeof(*request),
+                                            0, NULL);
+       kfree(request);
+       return retval;
+}
+
+static int gb_loopback_async_transfer_complete(
+                               struct gb_loopback_async_operation *op_async)
+{
+       struct gb_loopback *gb;
+       struct gb_operation *operation;
+       struct gb_loopback_transfer_request *request;
+       struct gb_loopback_transfer_response *response;
+       size_t len;
+       int retval = 0;
+
+       gb = op_async->gb;
+       operation = op_async->operation;
+       request = operation->request->payload;
+       response = operation->response->payload;
+       len = le32_to_cpu(request->len);
+
+       if (memcmp(request->data, response->data, len)) {
+               dev_err(&gb->connection->bundle->dev,
+                       "Loopback Data doesn't match operation id %d\n",
+                       operation->id);
+               retval = -EREMOTEIO;
+       } else {
+               gb->apbridge_latency_ts =
+                       (u32)__le32_to_cpu(response->reserved0);
+               gb->gbphy_latency_ts =
+                       (u32)__le32_to_cpu(response->reserved1);
+       }
+
+       return retval;
+}
+
+static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len)
+{
+       struct gb_loopback_transfer_request *request;
+       int retval, response_len;
+
+       request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+       if (!request)
+               return -ENOMEM;
+
+       memset(request->data, 0x5A, len);
+
+       request->len = cpu_to_le32(len);
+       response_len = sizeof(struct gb_loopback_transfer_response);
+       retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER,
+                                            request, len + sizeof(*request),
+                                            len + response_len,
+                                            gb_loopback_async_transfer_complete);
+       if (retval)
+               goto gb_error;
+
+gb_error:
+       kfree(request);
+       return retval;
+}
+
+static int gb_loopback_async_ping(struct gb_loopback *gb)
+{
+       return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING,
+                                          NULL, 0, 0, NULL);
+}
+
+static int gb_loopback_request_handler(struct gb_operation *operation)
+{
+       struct gb_connection *connection = operation->connection;
+       struct gb_loopback_transfer_request *request;
+       struct gb_loopback_transfer_response *response;
+       struct device *dev = &connection->bundle->dev;
+       size_t len;
+
+       /* By convention, the AP initiates the version operation */
+       switch (operation->type) {
+       case GB_LOOPBACK_TYPE_PING:
+       case GB_LOOPBACK_TYPE_SINK:
+               return 0;
+       case GB_LOOPBACK_TYPE_TRANSFER:
+               if (operation->request->payload_size < sizeof(*request)) {
+                       dev_err(dev, "transfer request too small (%zu < %zu)\n",
+                               operation->request->payload_size,
+                               sizeof(*request));
+                       return -EINVAL; /* -EMSGSIZE */
+               }
+               request = operation->request->payload;
+               len = le32_to_cpu(request->len);
+               if (len > gb_dev.size_max) {
+                       dev_err(dev, "transfer request too large (%zu > %zu)\n",
+                               len, gb_dev.size_max);
+                       return -EINVAL;
+               }
+
+               if (!gb_operation_response_alloc(operation,
+                               len + sizeof(*response), GFP_KERNEL)) {
+                       dev_err(dev, "error allocating response\n");
+                       return -ENOMEM;
+               }
+               response = operation->response->payload;
+               response->len = cpu_to_le32(len);
+               if (len)
+                       memcpy(response->data, request->data, len);
+
+               return 0;
+       default:
+               dev_err(dev, "unsupported request: %u\n", operation->type);
+               return -EINVAL;
+       }
+}
+
+static void gb_loopback_reset_stats(struct gb_loopback *gb)
+{
+       struct gb_loopback_stats reset = {
+               .min = U32_MAX,
+       };
+
+       /* Reset per-connection stats */
+       memcpy(&gb->latency, &reset,
+              sizeof(struct gb_loopback_stats));
+       memcpy(&gb->throughput, &reset,
+              sizeof(struct gb_loopback_stats));
+       memcpy(&gb->requests_per_second, &reset,
+              sizeof(struct gb_loopback_stats));
+       memcpy(&gb->apbridge_unipro_latency, &reset,
+              sizeof(struct gb_loopback_stats));
+       memcpy(&gb->gbphy_firmware_latency, &reset,
+              sizeof(struct gb_loopback_stats));
+
+       /* Should be initialized at least once per transaction set */
+       gb->apbridge_latency_ts = 0;
+       gb->gbphy_latency_ts = 0;
+       memset(&gb->ts, 0, sizeof(struct timeval));
+}
+
+static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
+{
+       if (stats->min > val)
+               stats->min = val;
+       if (stats->max < val)
+               stats->max = val;
+       stats->sum += val;
+       stats->count++;
+}
+
+static void gb_loopback_update_stats_window(struct gb_loopback_stats *stats,
+                                           u64 val, u32 count)
+{
+       stats->sum += val;
+       stats->count += count;
+
+       do_div(val, count);
+       if (stats->min > val)
+               stats->min = val;
+       if (stats->max < val)
+               stats->max = val;
+}
+
+static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
+{
+       u64 req = gb->requests_completed * USEC_PER_SEC;
+
+       gb_loopback_update_stats_window(&gb->requests_per_second, req, latency);
+}
+
+static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
+{
+       u64 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
+
+       switch (gb->type) {
+       case GB_LOOPBACK_TYPE_PING:
+               break;
+       case GB_LOOPBACK_TYPE_SINK:
+               aggregate_size += sizeof(struct gb_loopback_transfer_request) +
+                                 gb->size;
+               break;
+       case GB_LOOPBACK_TYPE_TRANSFER:
+               aggregate_size += sizeof(struct gb_loopback_transfer_request) +
+                                 sizeof(struct gb_loopback_transfer_response) +
+                                 gb->size * 2;
+               break;
+       default:
+               return;
+       }
+
+       aggregate_size *= gb->requests_completed;
+       aggregate_size *= USEC_PER_SEC;
+       gb_loopback_update_stats_window(&gb->throughput, aggregate_size,
+                                       latency);
+}
+
+static void gb_loopback_calculate_latency_stats(struct gb_loopback *gb)
+{
+       u32 lat;
+
+       /* Express latency in terms of microseconds */
+       lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
+
+       /* Log latency stastic */
+       gb_loopback_update_stats(&gb->latency, lat);
+
+       /* Raw latency log on a per thread basis */
+       kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
+
+       /* Log the firmware supplied latency values */
+       gb_loopback_update_stats(&gb->apbridge_unipro_latency,
+                                gb->apbridge_latency_ts);
+       gb_loopback_update_stats(&gb->gbphy_firmware_latency,
+                                gb->gbphy_latency_ts);
+}
+
+static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error)
+{
+       u64 nlat;
+       u32 lat;
+       struct timeval te;
+
+       if (!error) {
+               gb->requests_completed++;
+               gb_loopback_calculate_latency_stats(gb);
+       }
+
+       do_gettimeofday(&te);
+       nlat = gb_loopback_calc_latency(&gb->ts, &te);
+       if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) {
+               lat = gb_loopback_nsec_to_usec_latency(nlat);
+
+               gb_loopback_throughput_update(gb, lat);
+               gb_loopback_requests_update(gb, lat);
+
+               if (gb->iteration_count != gb->iteration_max) {
+                       gb->ts = te;
+                       gb->requests_completed = 0;
+               }
+       }
+}
+
+static void gb_loopback_async_wait_to_send(struct gb_loopback *gb)
+{
+       if (!(gb->async && gb->outstanding_operations_max))
+               return;
+       wait_event_interruptible(gb->wq_completion,
+                                (atomic_read(&gb->outstanding_operations) <
+                                 gb->outstanding_operations_max) ||
+                                 kthread_should_stop());
+}
+
+static int gb_loopback_fn(void *data)
+{
+       int error = 0;
+       int us_wait = 0;
+       int type;
+       int ret;
+       u32 size;
+
+       struct gb_loopback *gb = data;
+       struct gb_bundle *bundle = gb->connection->bundle;
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret)
+               return ret;
+
+       while (1) {
+               if (!gb->type) {
+                       gb_pm_runtime_put_autosuspend(bundle);
+                       wait_event_interruptible(gb->wq, gb->type ||
+                                                kthread_should_stop());
+                       ret = gb_pm_runtime_get_sync(bundle);
+                       if (ret)
+                               return ret;
+               }
+
+               if (kthread_should_stop())
+                       break;
+
+               /* Limit the maximum number of in-flight async operations */
+               gb_loopback_async_wait_to_send(gb);
+               if (kthread_should_stop())
+                       break;
+
+               mutex_lock(&gb->mutex);
+
+               /* Optionally terminate */
+               if (gb->send_count == gb->iteration_max) {
+                       if (gb->iteration_count == gb->iteration_max) {
+                               gb->type = 0;
+                               gb->send_count = 0;
+                               sysfs_notify(&gb->dev->kobj,  NULL,
+                                               "iteration_count");
+                       }
+                       mutex_unlock(&gb->mutex);
+                       continue;
+               }
+               size = gb->size;
+               us_wait = gb->us_wait;
+               type = gb->type;
+               if (gb->ts.tv_usec == 0 && gb->ts.tv_sec == 0)
+                       do_gettimeofday(&gb->ts);
+               mutex_unlock(&gb->mutex);
+
+               /* Else operations to perform */
+               if (gb->async) {
+                       if (type == GB_LOOPBACK_TYPE_PING) {
+                               error = gb_loopback_async_ping(gb);
+                       } else if (type == GB_LOOPBACK_TYPE_TRANSFER) {
+                               error = gb_loopback_async_transfer(gb, size);
+                       } else if (type == GB_LOOPBACK_TYPE_SINK) {
+                               error = gb_loopback_async_sink(gb, size);
+                       }
+
+                       if (error)
+                               gb->error++;
+               } else {
+                       /* We are effectively single threaded here */
+                       if (type == GB_LOOPBACK_TYPE_PING)
+                               error = gb_loopback_sync_ping(gb);
+                       else if (type == GB_LOOPBACK_TYPE_TRANSFER)
+                               error = gb_loopback_sync_transfer(gb, size);
+                       else if (type == GB_LOOPBACK_TYPE_SINK)
+                               error = gb_loopback_sync_sink(gb, size);
+
+                       if (error)
+                               gb->error++;
+                       gb->iteration_count++;
+                       gb_loopback_calculate_stats(gb, !!error);
+               }
+               gb->send_count++;
+               if (us_wait)
+                       udelay(us_wait);
+       }
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return 0;
+}
+
+static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
+                                                struct kfifo *kfifo,
+                                                struct mutex *mutex)
+{
+       u32 latency;
+       int retval;
+
+       if (kfifo_len(kfifo) == 0) {
+               retval = -EAGAIN;
+               goto done;
+       }
+
+       mutex_lock(mutex);
+       retval = kfifo_out(kfifo, &latency, sizeof(latency));
+       if (retval > 0) {
+               seq_printf(s, "%u", latency);
+               retval = 0;
+       }
+       mutex_unlock(mutex);
+done:
+       return retval;
+}
+
+static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
+{
+       struct gb_loopback *gb = s->private;
+
+       return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
+                                                    &gb->mutex);
+}
+
+static int gb_loopback_latency_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, gb_loopback_dbgfs_latency_show,
+                          inode->i_private);
+}
+
+static const struct file_operations gb_loopback_debugfs_latency_ops = {
+       .open           = gb_loopback_latency_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int gb_loopback_bus_id_compare(void *priv, struct list_head *lha,
+                                     struct list_head *lhb)
+{
+       struct gb_loopback *a = list_entry(lha, struct gb_loopback, entry);
+       struct gb_loopback *b = list_entry(lhb, struct gb_loopback, entry);
+       struct gb_connection *ca = a->connection;
+       struct gb_connection *cb = b->connection;
+
+       if (ca->bundle->intf->interface_id < cb->bundle->intf->interface_id)
+               return -1;
+       if (cb->bundle->intf->interface_id < ca->bundle->intf->interface_id)
+               return 1;
+       if (ca->bundle->id < cb->bundle->id)
+               return -1;
+       if (cb->bundle->id < ca->bundle->id)
+               return 1;
+       if (ca->intf_cport_id < cb->intf_cport_id)
+               return -1;
+       else if (cb->intf_cport_id < ca->intf_cport_id)
+               return 1;
+
+       return 0;
+}
+
+static void gb_loopback_insert_id(struct gb_loopback *gb)
+{
+       struct gb_loopback *gb_list;
+       u32 new_lbid = 0;
+
+       /* perform an insertion sort */
+       list_add_tail(&gb->entry, &gb_dev.list);
+       list_sort(NULL, &gb_dev.list, gb_loopback_bus_id_compare);
+       list_for_each_entry(gb_list, &gb_dev.list, entry) {
+               gb_list->lbid = 1 << new_lbid;
+               new_lbid++;
+       }
+}
+
+#define DEBUGFS_NAMELEN 32
+
+static int gb_loopback_probe(struct gb_bundle *bundle,
+                            const struct greybus_bundle_id *id)
+{
+       struct greybus_descriptor_cport *cport_desc;
+       struct gb_connection *connection;
+       struct gb_loopback *gb;
+       struct device *dev;
+       int retval;
+       char name[DEBUGFS_NAMELEN];
+       unsigned long flags;
+
+       if (bundle->num_cports != 1)
+               return -ENODEV;
+
+       cport_desc = &bundle->cport_desc[0];
+       if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOOPBACK)
+               return -ENODEV;
+
+       gb = kzalloc(sizeof(*gb), GFP_KERNEL);
+       if (!gb)
+               return -ENOMEM;
+
+       connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+                                         gb_loopback_request_handler);
+       if (IS_ERR(connection)) {
+               retval = PTR_ERR(connection);
+               goto out_kzalloc;
+       }
+
+       gb->connection = connection;
+       greybus_set_drvdata(bundle, gb);
+
+       init_waitqueue_head(&gb->wq);
+       init_waitqueue_head(&gb->wq_completion);
+       atomic_set(&gb->outstanding_operations, 0);
+       gb_loopback_reset_stats(gb);
+
+       /* Reported values to user-space for min/max timeouts */
+       gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN);
+       gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX);
+
+       if (!gb_dev.count) {
+               /* Calculate maximum payload */
+               gb_dev.size_max = gb_operation_get_payload_size_max(connection);
+               if (gb_dev.size_max <=
+                       sizeof(struct gb_loopback_transfer_request)) {
+                       retval = -EINVAL;
+                       goto out_connection_destroy;
+               }
+               gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
+       }
+
+       /* Create per-connection sysfs and debugfs data-points */
+       snprintf(name, sizeof(name), "raw_latency_%s",
+                dev_name(&connection->bundle->dev));
+       gb->file = debugfs_create_file(name, S_IFREG | S_IRUGO, gb_dev.root, gb,
+                                      &gb_loopback_debugfs_latency_ops);
+
+       gb->id = ida_simple_get(&loopback_ida, 0, 0, GFP_KERNEL);
+       if (gb->id < 0) {
+               retval = gb->id;
+               goto out_debugfs_remove;
+       }
+
+       retval = gb_connection_enable(connection);
+       if (retval)
+               goto out_ida_remove;
+
+       dev = device_create_with_groups(&loopback_class,
+                                       &connection->bundle->dev,
+                                       MKDEV(0, 0), gb, loopback_groups,
+                                       "gb_loopback%d", gb->id);
+       if (IS_ERR(dev)) {
+               retval = PTR_ERR(dev);
+               goto out_connection_disable;
+       }
+       gb->dev = dev;
+
+       /* Allocate kfifo */
+       if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
+                         GFP_KERNEL)) {
+               retval = -ENOMEM;
+               goto out_conn;
+       }
+       if (kfifo_alloc(&gb->kfifo_ts, kfifo_depth * sizeof(struct timeval) * 2,
+                         GFP_KERNEL)) {
+               retval = -ENOMEM;
+               goto out_kfifo0;
+       }
+
+       /* Fork worker thread */
+       mutex_init(&gb->mutex);
+       gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
+       if (IS_ERR(gb->task)) {
+               retval = PTR_ERR(gb->task);
+               goto out_kfifo1;
+       }
+
+       spin_lock_irqsave(&gb_dev.lock, flags);
+       gb_loopback_insert_id(gb);
+       gb_dev.count++;
+       spin_unlock_irqrestore(&gb_dev.lock, flags);
+
+       gb_connection_latency_tag_enable(connection);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return 0;
+
+out_kfifo1:
+       kfifo_free(&gb->kfifo_ts);
+out_kfifo0:
+       kfifo_free(&gb->kfifo_lat);
+out_conn:
+       device_unregister(dev);
+out_connection_disable:
+       gb_connection_disable(connection);
+out_ida_remove:
+       ida_simple_remove(&loopback_ida, gb->id);
+out_debugfs_remove:
+       debugfs_remove(gb->file);
+out_connection_destroy:
+       gb_connection_destroy(connection);
+out_kzalloc:
+       kfree(gb);
+
+       return retval;
+}
+
+static void gb_loopback_disconnect(struct gb_bundle *bundle)
+{
+       struct gb_loopback *gb = greybus_get_drvdata(bundle);
+       unsigned long flags;
+       int ret;
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret)
+               gb_pm_runtime_get_noresume(bundle);
+
+       gb_connection_disable(gb->connection);
+
+       if (!IS_ERR_OR_NULL(gb->task))
+               kthread_stop(gb->task);
+
+       kfifo_free(&gb->kfifo_lat);
+       kfifo_free(&gb->kfifo_ts);
+       gb_connection_latency_tag_disable(gb->connection);
+       debugfs_remove(gb->file);
+
+       /*
+        * FIXME: gb_loopback_async_wait_all() is redundant now, as connection
+        * is disabled at the beginning and so we can't have any more
+        * incoming/outgoing requests.
+        */
+       gb_loopback_async_wait_all(gb);
+
+       spin_lock_irqsave(&gb_dev.lock, flags);
+       gb_dev.count--;
+       list_del(&gb->entry);
+       spin_unlock_irqrestore(&gb_dev.lock, flags);
+
+       device_unregister(gb->dev);
+       ida_simple_remove(&loopback_ida, gb->id);
+
+       gb_connection_destroy(gb->connection);
+       kfree(gb);
+}
+
+static const struct greybus_bundle_id gb_loopback_id_table[] = {
+       { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK) },
+       { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_loopback_id_table);
+
+static struct greybus_driver gb_loopback_driver = {
+       .name           = "loopback",
+       .probe          = gb_loopback_probe,
+       .disconnect     = gb_loopback_disconnect,
+       .id_table       = gb_loopback_id_table,
+};
+
+static int loopback_init(void)
+{
+       int retval;
+
+       INIT_LIST_HEAD(&gb_dev.list);
+       INIT_LIST_HEAD(&gb_dev.list_op_async);
+       spin_lock_init(&gb_dev.lock);
+       gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
+
+       retval = class_register(&loopback_class);
+       if (retval)
+               goto err;
+
+       retval = greybus_register(&gb_loopback_driver);
+       if (retval)
+               goto err_unregister;
+
+       return 0;
+
+err_unregister:
+       class_unregister(&loopback_class);
+err:
+       debugfs_remove_recursive(gb_dev.root);
+       return retval;
+}
+module_init(loopback_init);
+
+static void __exit loopback_exit(void)
+{
+       debugfs_remove_recursive(gb_dev.root);
+       greybus_deregister(&gb_loopback_driver);
+       class_unregister(&loopback_class);
+       ida_destroy(&loopback_ida);
+}
+module_exit(loopback_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/manifest.c b/drivers/staging/greybus/manifest.c
new file mode 100644 (file)
index 0000000..3d1592f
--- /dev/null
@@ -0,0 +1,535 @@
+/*
+ * Greybus manifest parsing
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+
+static const char *get_descriptor_type_string(u8 type)
+{
+       switch(type) {
+       case GREYBUS_TYPE_INVALID:
+               return "invalid";
+       case GREYBUS_TYPE_STRING:
+               return "string";
+       case GREYBUS_TYPE_INTERFACE:
+               return "interface";
+       case GREYBUS_TYPE_CPORT:
+               return "cport";
+       case GREYBUS_TYPE_BUNDLE:
+               return "bundle";
+       default:
+               WARN_ON(1);
+               return "unknown";
+       }
+}
+
+/*
+ * We scan the manifest once to identify where all the descriptors
+ * are.  The result is a list of these manifest_desc structures.  We
+ * then pick through them for what we're looking for (starting with
+ * the interface descriptor).  As each is processed we remove it from
+ * the list.  When we're done the list should (probably) be empty.
+ */
+struct manifest_desc {
+       struct list_head                links;
+
+       size_t                          size;
+       void                            *data;
+       enum greybus_descriptor_type    type;
+};
+
+static void release_manifest_descriptor(struct manifest_desc *descriptor)
+{
+       list_del(&descriptor->links);
+       kfree(descriptor);
+}
+
+static void release_manifest_descriptors(struct gb_interface *intf)
+{
+       struct manifest_desc *descriptor;
+       struct manifest_desc *next;
+
+       list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
+               release_manifest_descriptor(descriptor);
+}
+
+static void release_cport_descriptors(struct list_head *head, u8 bundle_id)
+{
+       struct manifest_desc *desc, *tmp;
+       struct greybus_descriptor_cport *desc_cport;
+
+       list_for_each_entry_safe(desc, tmp, head, links) {
+               desc_cport = desc->data;
+
+               if (desc->type != GREYBUS_TYPE_CPORT)
+                       continue;
+
+               if (desc_cport->bundle == bundle_id)
+                       release_manifest_descriptor(desc);
+       }
+}
+
+static struct manifest_desc *get_next_bundle_desc(struct gb_interface *intf)
+{
+       struct manifest_desc *descriptor;
+       struct manifest_desc *next;
+
+       list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
+               if (descriptor->type == GREYBUS_TYPE_BUNDLE)
+                       return descriptor;
+
+       return NULL;
+}
+
+/*
+ * Validate the given descriptor.  Its reported size must fit within
+ * the number of bytes remaining, and it must have a recognized
+ * type.  Check that the reported size is at least as big as what
+ * we expect to see.  (It could be bigger, perhaps for a new version
+ * of the format.)
+ *
+ * Returns the (non-zero) number of bytes consumed by the descriptor,
+ * or a negative errno.
+ */
+static int identify_descriptor(struct gb_interface *intf,
+                              struct greybus_descriptor *desc, size_t size)
+{
+       struct greybus_descriptor_header *desc_header = &desc->header;
+       struct manifest_desc *descriptor;
+       size_t desc_size;
+       size_t expected_size;
+
+       if (size < sizeof(*desc_header)) {
+               dev_err(&intf->dev, "manifest too small (%zu < %zu)\n",
+                               size, sizeof(*desc_header));
+               return -EINVAL;         /* Must at least have header */
+       }
+
+       desc_size = le16_to_cpu(desc_header->size);
+       if (desc_size > size) {
+               dev_err(&intf->dev, "descriptor too big (%zu > %zu)\n",
+                               desc_size, size);
+               return -EINVAL;
+       }
+
+       /* Descriptor needs to at least have a header */
+       expected_size = sizeof(*desc_header);
+
+       switch (desc_header->type) {
+       case GREYBUS_TYPE_STRING:
+               expected_size += sizeof(struct greybus_descriptor_string);
+               expected_size += desc->string.length;
+
+               /* String descriptors are padded to 4 byte boundaries */
+               expected_size = ALIGN(expected_size, 4);
+               break;
+       case GREYBUS_TYPE_INTERFACE:
+               expected_size += sizeof(struct greybus_descriptor_interface);
+               break;
+       case GREYBUS_TYPE_BUNDLE:
+               expected_size += sizeof(struct greybus_descriptor_bundle);
+               break;
+       case GREYBUS_TYPE_CPORT:
+               expected_size += sizeof(struct greybus_descriptor_cport);
+               break;
+       case GREYBUS_TYPE_INVALID:
+       default:
+               dev_err(&intf->dev, "invalid descriptor type (%u)\n",
+                               desc_header->type);
+               return -EINVAL;
+       }
+
+       if (desc_size < expected_size) {
+               dev_err(&intf->dev, "%s descriptor too small (%zu < %zu)\n",
+                               get_descriptor_type_string(desc_header->type),
+                               desc_size, expected_size);
+               return -EINVAL;
+       }
+
+       /* Descriptor bigger than what we expect */
+       if (desc_size > expected_size) {
+               dev_warn(&intf->dev, "%s descriptor size mismatch (want %zu got %zu)\n",
+                               get_descriptor_type_string(desc_header->type),
+                               expected_size, desc_size);
+       }
+
+       descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL);
+       if (!descriptor)
+               return -ENOMEM;
+
+       descriptor->size = desc_size;
+       descriptor->data = (char *)desc + sizeof(*desc_header);
+       descriptor->type = desc_header->type;
+       list_add_tail(&descriptor->links, &intf->manifest_descs);
+
+       /* desc_size is positive and is known to fit in a signed int */
+
+       return desc_size;
+}
+
+/*
+ * Find the string descriptor having the given id, validate it, and
+ * allocate a duplicate copy of it.  The duplicate has an extra byte
+ * which guarantees the returned string is NUL-terminated.
+ *
+ * String index 0 is valid (it represents "no string"), and for
+ * that a null pointer is returned.
+ *
+ * Otherwise returns a pointer to a newly-allocated copy of the
+ * descriptor string, or an error-coded pointer on failure.
+ */
+static char *gb_string_get(struct gb_interface *intf, u8 string_id)
+{
+       struct greybus_descriptor_string *desc_string;
+       struct manifest_desc *descriptor;
+       bool found = false;
+       char *string;
+
+       /* A zero string id means no string (but no error) */
+       if (!string_id)
+               return NULL;
+
+       list_for_each_entry(descriptor, &intf->manifest_descs, links) {
+               if (descriptor->type != GREYBUS_TYPE_STRING)
+                       continue;
+
+               desc_string = descriptor->data;
+               if (desc_string->id == string_id) {
+                       found = true;
+                       break;
+               }
+       }
+       if (!found)
+               return ERR_PTR(-ENOENT);
+
+       /* Allocate an extra byte so we can guarantee it's NUL-terminated */
+       string = kmemdup(&desc_string->string, desc_string->length + 1,
+                               GFP_KERNEL);
+       if (!string)
+               return ERR_PTR(-ENOMEM);
+       string[desc_string->length] = '\0';
+
+       /* Ok we've used this string, so we're done with it */
+       release_manifest_descriptor(descriptor);
+
+       return string;
+}
+
+/*
+ * Find cport descriptors in the manifest associated with the given
+ * bundle, and set up data structures for the functions that use
+ * them.  Returns the number of cports set up for the bundle, or 0
+ * if there is an error.
+ */
+static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
+{
+       struct gb_interface *intf = bundle->intf;
+       struct greybus_descriptor_cport *desc_cport;
+       struct manifest_desc *desc, *next, *tmp;
+       LIST_HEAD(list);
+       u8 bundle_id = bundle->id;
+       u16 cport_id;
+       u32 count = 0;
+       int i;
+
+       /* Set up all cport descriptors associated with this bundle */
+       list_for_each_entry_safe(desc, next, &intf->manifest_descs, links) {
+               if (desc->type != GREYBUS_TYPE_CPORT)
+                       continue;
+
+               desc_cport = desc->data;
+               if (desc_cport->bundle != bundle_id)
+                       continue;
+
+               cport_id = le16_to_cpu(desc_cport->id);
+               if (cport_id > CPORT_ID_MAX)
+                       goto exit;
+
+               /* Nothing else should have its cport_id as control cport id */
+               if (cport_id == GB_CONTROL_CPORT_ID) {
+                       dev_err(&bundle->dev, "invalid cport id found (%02u)\n",
+                               cport_id);
+                       goto exit;
+               }
+
+               /*
+                * Found one, move it to our temporary list after checking for
+                * duplicates.
+                */
+               list_for_each_entry(tmp, &list, links) {
+                       desc_cport = tmp->data;
+                       if (cport_id == le16_to_cpu(desc_cport->id)) {
+                               dev_err(&bundle->dev,
+                                               "duplicate CPort %u found\n",
+                                               cport_id);
+                               goto exit;
+                       }
+               }
+               list_move_tail(&desc->links, &list);
+               count++;
+       }
+
+       if (!count)
+               return 0;
+
+       bundle->cport_desc = kcalloc(count, sizeof(*bundle->cport_desc),
+                                       GFP_KERNEL);
+       if (!bundle->cport_desc)
+               goto exit;
+
+       bundle->num_cports = count;
+
+       i = 0;
+       list_for_each_entry_safe(desc, next, &list, links) {
+               desc_cport = desc->data;
+               memcpy(&bundle->cport_desc[i++], desc_cport,
+                               sizeof(*desc_cport));
+
+               /* Release the cport descriptor */
+               release_manifest_descriptor(desc);
+       }
+
+       return count;
+exit:
+       release_cport_descriptors(&list, bundle_id);
+       /*
+        * Free all cports for this bundle to avoid 'excess descriptors'
+        * warnings.
+        */
+       release_cport_descriptors(&intf->manifest_descs, bundle_id);
+
+       return 0;       /* Error; count should also be 0 */
+}
+
+/*
+ * Find bundle descriptors in the manifest and set up their data
+ * structures.  Returns the number of bundles set up for the
+ * given interface.
+ */
+static u32 gb_manifest_parse_bundles(struct gb_interface *intf)
+{
+       struct manifest_desc *desc;
+       struct gb_bundle *bundle;
+       struct gb_bundle *bundle_next;
+       u32 count = 0;
+       u8 bundle_id;
+       u8 class;
+
+       while ((desc = get_next_bundle_desc(intf))) {
+               struct greybus_descriptor_bundle *desc_bundle;
+
+               /* Found one.  Set up its bundle structure*/
+               desc_bundle = desc->data;
+               bundle_id = desc_bundle->id;
+               class = desc_bundle->class;
+
+               /* Done with this bundle descriptor */
+               release_manifest_descriptor(desc);
+
+               /* Ignore any legacy control bundles */
+               if (bundle_id == GB_CONTROL_BUNDLE_ID) {
+                       dev_dbg(&intf->dev, "%s - ignoring control bundle\n",
+                                       __func__);
+                       release_cport_descriptors(&intf->manifest_descs,
+                                                               bundle_id);
+                       continue;
+               }
+
+               /* Nothing else should have its class set to control class */
+               if (class == GREYBUS_CLASS_CONTROL) {
+                       dev_err(&intf->dev,
+                               "bundle %u cannot use control class\n",
+                               bundle_id);
+                       goto cleanup;
+               }
+
+               bundle = gb_bundle_create(intf, bundle_id, class);
+               if (!bundle)
+                       goto cleanup;
+
+               /*
+                * Now go set up this bundle's functions and cports.
+                *
+                * A 'bundle' represents a device in greybus. It may require
+                * multiple cports for its functioning. If we fail to setup any
+                * cport of a bundle, we better reject the complete bundle as
+                * the device may not be able to function properly then.
+                *
+                * But, failing to setup a cport of bundle X doesn't mean that
+                * the device corresponding to bundle Y will not work properly.
+                * Bundles should be treated as separate independent devices.
+                *
+                * While parsing manifest for an interface, treat bundles as
+                * separate entities and don't reject entire interface and its
+                * bundles on failing to initialize a cport. But make sure the
+                * bundle which needs the cport, gets destroyed properly.
+                */
+               if (!gb_manifest_parse_cports(bundle)) {
+                       gb_bundle_destroy(bundle);
+                       continue;
+               }
+
+               count++;
+       }
+
+       return count;
+cleanup:
+       /* An error occurred; undo any changes we've made */
+       list_for_each_entry_safe(bundle, bundle_next, &intf->bundles, links) {
+               gb_bundle_destroy(bundle);
+               count--;
+       }
+       return 0;       /* Error; count should also be 0 */
+}
+
+static bool gb_manifest_parse_interface(struct gb_interface *intf,
+                                       struct manifest_desc *interface_desc)
+{
+       struct greybus_descriptor_interface *desc_intf = interface_desc->data;
+       struct gb_control *control = intf->control;
+       char *str;
+
+       /* Handle the strings first--they can fail */
+       str = gb_string_get(intf, desc_intf->vendor_stringid);
+       if (IS_ERR(str))
+               return false;
+       control->vendor_string = str;
+
+       str = gb_string_get(intf, desc_intf->product_stringid);
+       if (IS_ERR(str))
+               goto out_free_vendor_string;
+       control->product_string = str;
+
+       /* Assign feature flags communicated via manifest */
+       intf->features = desc_intf->features;
+
+       /* Release the interface descriptor, now that we're done with it */
+       release_manifest_descriptor(interface_desc);
+
+       /* An interface must have at least one bundle descriptor */
+       if (!gb_manifest_parse_bundles(intf)) {
+               dev_err(&intf->dev, "manifest bundle descriptors not valid\n");
+               goto out_err;
+       }
+
+       return true;
+out_err:
+       kfree(control->product_string);
+       control->product_string = NULL;
+out_free_vendor_string:
+       kfree(control->vendor_string);
+       control->vendor_string = NULL;
+
+       return false;
+}
+
+/*
+ * Parse a buffer containing an interface manifest.
+ *
+ * If we find anything wrong with the content/format of the buffer
+ * we reject it.
+ *
+ * The first requirement is that the manifest's version is
+ * one we can parse.
+ *
+ * We make an initial pass through the buffer and identify all of
+ * the descriptors it contains, keeping track for each its type
+ * and the location size of its data in the buffer.
+ *
+ * Next we scan the descriptors, looking for an interface descriptor;
+ * there must be exactly one of those.  When found, we record the
+ * information it contains, and then remove that descriptor (and any
+ * string descriptors it refers to) from further consideration.
+ *
+ * After that we look for the interface's bundles--there must be at
+ * least one of those.
+ *
+ * Returns true if parsing was successful, false otherwise.
+ */
+bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
+{
+       struct greybus_manifest *manifest;
+       struct greybus_manifest_header *header;
+       struct greybus_descriptor *desc;
+       struct manifest_desc *descriptor;
+       struct manifest_desc *interface_desc = NULL;
+       u16 manifest_size;
+       u32 found = 0;
+       bool result;
+
+       /* Manifest descriptor list should be empty here */
+       if (WARN_ON(!list_empty(&intf->manifest_descs)))
+               return false;
+
+       /* we have to have at _least_ the manifest header */
+       if (size < sizeof(*header)) {
+               dev_err(&intf->dev, "short manifest (%zu < %zu)\n",
+                               size, sizeof(*header));
+               return false;
+       }
+
+       /* Make sure the size is right */
+       manifest = data;
+       header = &manifest->header;
+       manifest_size = le16_to_cpu(header->size);
+       if (manifest_size != size) {
+               dev_err(&intf->dev, "manifest size mismatch (%zu != %u)\n",
+                               size, manifest_size);
+               return false;
+       }
+
+       /* Validate major/minor number */
+       if (header->version_major > GREYBUS_VERSION_MAJOR) {
+               dev_err(&intf->dev, "manifest version too new (%u.%u > %u.%u)\n",
+                               header->version_major, header->version_minor,
+                               GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR);
+               return false;
+       }
+
+       /* OK, find all the descriptors */
+       desc = manifest->descriptors;
+       size -= sizeof(*header);
+       while (size) {
+               int desc_size;
+
+               desc_size = identify_descriptor(intf, desc, size);
+               if (desc_size < 0) {
+                       result = false;
+                       goto out;
+               }
+               desc = (struct greybus_descriptor *)((char *)desc + desc_size);
+               size -= desc_size;
+       }
+
+       /* There must be a single interface descriptor */
+       list_for_each_entry(descriptor, &intf->manifest_descs, links) {
+               if (descriptor->type == GREYBUS_TYPE_INTERFACE)
+                       if (!found++)
+                               interface_desc = descriptor;
+       }
+       if (found != 1) {
+               dev_err(&intf->dev, "manifest must have 1 interface descriptor (%u found)\n",
+                               found);
+               result = false;
+               goto out;
+       }
+
+       /* Parse the manifest, starting with the interface descriptor */
+       result = gb_manifest_parse_interface(intf, interface_desc);
+
+       /*
+        * We really should have no remaining descriptors, but we
+        * don't know what newer format manifests might leave.
+        */
+       if (result && !list_empty(&intf->manifest_descs))
+               dev_info(&intf->dev, "excess descriptors in interface manifest\n");
+out:
+       release_manifest_descriptors(intf);
+
+       return result;
+}
diff --git a/drivers/staging/greybus/manifest.h b/drivers/staging/greybus/manifest.h
new file mode 100644 (file)
index 0000000..d964284
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Greybus manifest parsing
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __MANIFEST_H
+#define __MANIFEST_H
+
+struct gb_interface;
+bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size);
+
+#endif /* __MANIFEST_H */
diff --git a/drivers/staging/greybus/module.c b/drivers/staging/greybus/module.c
new file mode 100644 (file)
index 0000000..69f67dd
--- /dev/null
@@ -0,0 +1,238 @@
+/*
+ * Greybus Module code
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+
+static ssize_t eject_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t len)
+{
+       struct gb_module *module = to_gb_module(dev);
+       struct gb_interface *intf;
+       size_t i;
+       long val;
+       int ret;
+
+       ret = kstrtol(buf, 0, &val);
+       if (ret)
+               return ret;
+
+       if (!val)
+               return len;
+
+       for (i = 0; i < module->num_interfaces; ++i) {
+               intf = module->interfaces[i];
+
+               mutex_lock(&intf->mutex);
+               /* Set flag to prevent concurrent activation. */
+               intf->ejected = true;
+               gb_interface_disable(intf);
+               gb_interface_deactivate(intf);
+               mutex_unlock(&intf->mutex);
+       }
+
+       /* Tell the SVC to eject the primary interface. */
+       ret = gb_svc_intf_eject(module->hd->svc, module->module_id);
+       if (ret)
+               return ret;
+
+       return len;
+}
+static DEVICE_ATTR_WO(eject);
+
+static ssize_t module_id_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct gb_module *module = to_gb_module(dev);
+
+       return sprintf(buf, "%u\n", module->module_id);
+}
+static DEVICE_ATTR_RO(module_id);
+
+static ssize_t num_interfaces_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct gb_module *module = to_gb_module(dev);
+
+       return sprintf(buf, "%zu\n", module->num_interfaces);
+}
+static DEVICE_ATTR_RO(num_interfaces);
+
+static struct attribute *module_attrs[] = {
+       &dev_attr_eject.attr,
+       &dev_attr_module_id.attr,
+       &dev_attr_num_interfaces.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(module);
+
+static void gb_module_release(struct device *dev)
+{
+       struct gb_module *module = to_gb_module(dev);
+
+       trace_gb_module_release(module);
+
+       kfree(module);
+}
+
+struct device_type greybus_module_type = {
+       .name           = "greybus_module",
+       .release        = gb_module_release,
+};
+
+struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
+                                       size_t num_interfaces)
+{
+       struct gb_interface *intf;
+       struct gb_module *module;
+       int i;
+
+       module = kzalloc(sizeof(*module) + num_interfaces * sizeof(intf),
+                               GFP_KERNEL);
+       if (!module)
+               return NULL;
+
+       module->hd = hd;
+       module->module_id = module_id;
+       module->num_interfaces = num_interfaces;
+
+       module->dev.parent = &hd->dev;
+       module->dev.bus = &greybus_bus_type;
+       module->dev.type = &greybus_module_type;
+       module->dev.groups = module_groups;
+       module->dev.dma_mask = hd->dev.dma_mask;
+       device_initialize(&module->dev);
+       dev_set_name(&module->dev, "%d-%u", hd->bus_id, module_id);
+
+       trace_gb_module_create(module);
+
+       for (i = 0; i < num_interfaces; ++i) {
+               intf = gb_interface_create(module, module_id + i);
+               if (!intf) {
+                       dev_err(&module->dev, "failed to create interface %u\n",
+                                       module_id + i);
+                       goto err_put_interfaces;
+               }
+               module->interfaces[i] = intf;
+       }
+
+       return module;
+
+err_put_interfaces:
+       for (--i; i > 0; --i)
+               gb_interface_put(module->interfaces[i]);
+
+       put_device(&module->dev);
+
+       return NULL;
+}
+
+/*
+ * Register and enable an interface after first attempting to activate it.
+ */
+static void gb_module_register_interface(struct gb_interface *intf)
+{
+       struct gb_module *module = intf->module;
+       u8 intf_id = intf->interface_id;
+       int ret;
+
+       mutex_lock(&intf->mutex);
+
+       ret = gb_interface_activate(intf);
+       if (ret) {
+               if (intf->type != GB_INTERFACE_TYPE_DUMMY) {
+                       dev_err(&module->dev,
+                                       "failed to activate interface %u: %d\n",
+                                       intf_id, ret);
+               }
+
+               gb_interface_add(intf);
+               goto err_unlock;
+       }
+
+       ret = gb_interface_add(intf);
+       if (ret)
+               goto err_interface_deactivate;
+
+       ret = gb_interface_enable(intf);
+       if (ret) {
+               dev_err(&module->dev, "failed to enable interface %u: %d\n",
+                               intf_id, ret);
+               goto err_interface_deactivate;
+       }
+
+       mutex_unlock(&intf->mutex);
+
+       return;
+
+err_interface_deactivate:
+       gb_interface_deactivate(intf);
+err_unlock:
+       mutex_unlock(&intf->mutex);
+}
+
+static void gb_module_deregister_interface(struct gb_interface *intf)
+{
+       /* Mark as disconnected to prevent I/O during disable. */
+       if (intf->module->disconnected)
+               intf->disconnected = true;
+
+       mutex_lock(&intf->mutex);
+       intf->removed = true;
+       gb_interface_disable(intf);
+       gb_interface_deactivate(intf);
+       mutex_unlock(&intf->mutex);
+
+       gb_interface_del(intf);
+}
+
+/* Register a module and its interfaces. */
+int gb_module_add(struct gb_module *module)
+{
+       size_t i;
+       int ret;
+
+       ret = device_add(&module->dev);
+       if (ret) {
+               dev_err(&module->dev, "failed to register module: %d\n", ret);
+               return ret;
+       }
+
+       trace_gb_module_add(module);
+
+       for (i = 0; i < module->num_interfaces; ++i)
+               gb_module_register_interface(module->interfaces[i]);
+
+       return 0;
+}
+
+/* Deregister a module and its interfaces. */
+void gb_module_del(struct gb_module *module)
+{
+       size_t i;
+
+       for (i = 0; i < module->num_interfaces; ++i)
+               gb_module_deregister_interface(module->interfaces[i]);
+
+       trace_gb_module_del(module);
+
+       device_del(&module->dev);
+}
+
+void gb_module_put(struct gb_module *module)
+{
+       size_t i;
+
+       for (i = 0; i < module->num_interfaces; ++i)
+               gb_interface_put(module->interfaces[i]);
+
+       put_device(&module->dev);
+}
diff --git a/drivers/staging/greybus/module.h b/drivers/staging/greybus/module.h
new file mode 100644 (file)
index 0000000..88a97ce
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Greybus Module code
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __MODULE_H
+#define __MODULE_H
+
+struct gb_module {
+       struct device dev;
+       struct gb_host_device *hd;
+
+       struct list_head hd_node;
+
+       u8 module_id;
+       size_t num_interfaces;
+
+       bool disconnected;
+
+       struct gb_interface *interfaces[0];
+};
+#define to_gb_module(d) container_of(d, struct gb_module, dev)
+
+struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
+                                  size_t num_interfaces);
+int gb_module_add(struct gb_module *module);
+void gb_module_del(struct gb_module *module);
+void gb_module_put(struct gb_module *module);
+
+#endif /* __MODULE_H */
diff --git a/drivers/staging/greybus/operation.c b/drivers/staging/greybus/operation.c
new file mode 100644 (file)
index 0000000..0123109
--- /dev/null
@@ -0,0 +1,1239 @@
+/*
+ * Greybus operations
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+static struct kmem_cache *gb_operation_cache;
+static struct kmem_cache *gb_message_cache;
+
+/* Workqueue to handle Greybus operation completions. */
+static struct workqueue_struct *gb_operation_completion_wq;
+
+/* Wait queue for synchronous cancellations. */
+static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
+
+/*
+ * Protects updates to operation->errno.
+ */
+static DEFINE_SPINLOCK(gb_operations_lock);
+
+static int gb_operation_response_send(struct gb_operation *operation,
+                                       int errno);
+
+/*
+ * Increment operation active count and add to connection list unless the
+ * connection is going away.
+ *
+ * Caller holds operation reference.
+ */
+static int gb_operation_get_active(struct gb_operation *operation)
+{
+       struct gb_connection *connection = operation->connection;
+       unsigned long flags;
+
+       spin_lock_irqsave(&connection->lock, flags);
+       switch (connection->state) {
+       case GB_CONNECTION_STATE_ENABLED:
+               break;
+       case GB_CONNECTION_STATE_ENABLED_TX:
+               if (gb_operation_is_incoming(operation))
+                       goto err_unlock;
+               break;
+       case GB_CONNECTION_STATE_DISCONNECTING:
+               if (!gb_operation_is_core(operation))
+                       goto err_unlock;
+               break;
+       default:
+               goto err_unlock;
+       }
+
+       if (operation->active++ == 0)
+               list_add_tail(&operation->links, &connection->operations);
+
+       trace_gb_operation_get_active(operation);
+
+       spin_unlock_irqrestore(&connection->lock, flags);
+
+       return 0;
+
+err_unlock:
+       spin_unlock_irqrestore(&connection->lock, flags);
+
+       return -ENOTCONN;
+}
+
+/* Caller holds operation reference. */
+static void gb_operation_put_active(struct gb_operation *operation)
+{
+       struct gb_connection *connection = operation->connection;
+       unsigned long flags;
+
+       spin_lock_irqsave(&connection->lock, flags);
+
+       trace_gb_operation_put_active(operation);
+
+       if (--operation->active == 0) {
+               list_del(&operation->links);
+               if (atomic_read(&operation->waiters))
+                       wake_up(&gb_operation_cancellation_queue);
+       }
+       spin_unlock_irqrestore(&connection->lock, flags);
+}
+
+static bool gb_operation_is_active(struct gb_operation *operation)
+{
+       struct gb_connection *connection = operation->connection;
+       unsigned long flags;
+       bool ret;
+
+       spin_lock_irqsave(&connection->lock, flags);
+       ret = operation->active;
+       spin_unlock_irqrestore(&connection->lock, flags);
+
+       return ret;
+}
+
+/*
+ * Set an operation's result.
+ *
+ * Initially an outgoing operation's errno value is -EBADR.
+ * If no error occurs before sending the request message the only
+ * valid value operation->errno can be set to is -EINPROGRESS,
+ * indicating the request has been (or rather is about to be) sent.
+ * At that point nobody should be looking at the result until the
+ * response arrives.
+ *
+ * The first time the result gets set after the request has been
+ * sent, that result "sticks."  That is, if two concurrent threads
+ * race to set the result, the first one wins.  The return value
+ * tells the caller whether its result was recorded; if not the
+ * caller has nothing more to do.
+ *
+ * The result value -EILSEQ is reserved to signal an implementation
+ * error; if it's ever observed, the code performing the request has
+ * done something fundamentally wrong.  It is an error to try to set
+ * the result to -EBADR, and attempts to do so result in a warning,
+ * and -EILSEQ is used instead.  Similarly, the only valid result
+ * value to set for an operation in initial state is -EINPROGRESS.
+ * Attempts to do otherwise will also record a (successful) -EILSEQ
+ * operation result.
+ */
+static bool gb_operation_result_set(struct gb_operation *operation, int result)
+{
+       unsigned long flags;
+       int prev;
+
+       if (result == -EINPROGRESS) {
+               /*
+                * -EINPROGRESS is used to indicate the request is
+                * in flight.  It should be the first result value
+                * set after the initial -EBADR.  Issue a warning
+                * and record an implementation error if it's
+                * set at any other time.
+                */
+               spin_lock_irqsave(&gb_operations_lock, flags);
+               prev = operation->errno;
+               if (prev == -EBADR)
+                       operation->errno = result;
+               else
+                       operation->errno = -EILSEQ;
+               spin_unlock_irqrestore(&gb_operations_lock, flags);
+               WARN_ON(prev != -EBADR);
+
+               return true;
+       }
+
+       /*
+        * The first result value set after a request has been sent
+        * will be the final result of the operation.  Subsequent
+        * attempts to set the result are ignored.
+        *
+        * Note that -EBADR is a reserved "initial state" result
+        * value.  Attempts to set this value result in a warning,
+        * and the result code is set to -EILSEQ instead.
+        */
+       if (WARN_ON(result == -EBADR))
+               result = -EILSEQ; /* Nobody should be setting -EBADR */
+
+       spin_lock_irqsave(&gb_operations_lock, flags);
+       prev = operation->errno;
+       if (prev == -EINPROGRESS)
+               operation->errno = result;      /* First and final result */
+       spin_unlock_irqrestore(&gb_operations_lock, flags);
+
+       return prev == -EINPROGRESS;
+}
+
+int gb_operation_result(struct gb_operation *operation)
+{
+       int result = operation->errno;
+
+       WARN_ON(result == -EBADR);
+       WARN_ON(result == -EINPROGRESS);
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(gb_operation_result);
+
+/*
+ * Looks up an outgoing operation on a connection and returns a refcounted
+ * pointer if found, or NULL otherwise.
+ */
+static struct gb_operation *
+gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
+{
+       struct gb_operation *operation;
+       unsigned long flags;
+       bool found = false;
+
+       spin_lock_irqsave(&connection->lock, flags);
+       list_for_each_entry(operation, &connection->operations, links)
+               if (operation->id == operation_id &&
+                               !gb_operation_is_incoming(operation)) {
+                       gb_operation_get(operation);
+                       found = true;
+                       break;
+               }
+       spin_unlock_irqrestore(&connection->lock, flags);
+
+       return found ? operation : NULL;
+}
+
+static int gb_message_send(struct gb_message *message, gfp_t gfp)
+{
+       struct gb_connection *connection = message->operation->connection;
+
+       trace_gb_message_send(message);
+       return connection->hd->driver->message_send(connection->hd,
+                                       connection->hd_cport_id,
+                                       message,
+                                       gfp);
+}
+
+/*
+ * Cancel a message we have passed to the host device layer to be sent.
+ */
+static void gb_message_cancel(struct gb_message *message)
+{
+       struct gb_host_device *hd = message->operation->connection->hd;
+
+       hd->driver->message_cancel(message);
+}
+
+static void gb_operation_request_handle(struct gb_operation *operation)
+{
+       struct gb_connection *connection = operation->connection;
+       int status;
+       int ret;
+
+       if (connection->handler) {
+               status = connection->handler(operation);
+       } else {
+               dev_err(&connection->hd->dev,
+                       "%s: unexpected incoming request of type 0x%02x\n",
+                       connection->name, operation->type);
+
+               status = -EPROTONOSUPPORT;
+       }
+
+       ret = gb_operation_response_send(operation, status);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: failed to send response %d for type 0x%02x: %d\n",
+                       connection->name, status, operation->type, ret);
+               return;
+       }
+}
+
+/*
+ * Process operation work.
+ *
+ * For incoming requests, call the protocol request handler. The operation
+ * result should be -EINPROGRESS at this point.
+ *
+ * For outgoing requests, the operation result value should have
+ * been set before queueing this.  The operation callback function
+ * allows the original requester to know the request has completed
+ * and its result is available.
+ */
+static void gb_operation_work(struct work_struct *work)
+{
+       struct gb_operation *operation;
+
+       operation = container_of(work, struct gb_operation, work);
+
+       if (gb_operation_is_incoming(operation))
+               gb_operation_request_handle(operation);
+       else
+               operation->callback(operation);
+
+       gb_operation_put_active(operation);
+       gb_operation_put(operation);
+}
+
+static void gb_operation_message_init(struct gb_host_device *hd,
+                               struct gb_message *message, u16 operation_id,
+                               size_t payload_size, u8 type)
+{
+       struct gb_operation_msg_hdr *header;
+
+       header = message->buffer;
+
+       message->header = header;
+       message->payload = payload_size ? header + 1 : NULL;
+       message->payload_size = payload_size;
+
+       /*
+        * The type supplied for incoming message buffers will be
+        * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
+        * arriving data so there's no need to initialize the message header.
+        */
+       if (type != GB_REQUEST_TYPE_INVALID) {
+               u16 message_size = (u16)(sizeof(*header) + payload_size);
+
+               /*
+                * For a request, the operation id gets filled in
+                * when the message is sent.  For a response, it
+                * will be copied from the request by the caller.
+                *
+                * The result field in a request message must be
+                * zero.  It will be set just prior to sending for
+                * a response.
+                */
+               header->size = cpu_to_le16(message_size);
+               header->operation_id = 0;
+               header->type = type;
+               header->result = 0;
+       }
+}
+
+/*
+ * Allocate a message to be used for an operation request or response.
+ * Both types of message contain a common header.  The request message
+ * for an outgoing operation is outbound, as is the response message
+ * for an incoming operation.  The message header for an outbound
+ * message is partially initialized here.
+ *
+ * The headers for inbound messages don't need to be initialized;
+ * they'll be filled in by arriving data.
+ *
+ * Our message buffers have the following layout:
+ *     message header  \_ these combined are
+ *     message payload /  the message size
+ */
+static struct gb_message *
+gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
+                               size_t payload_size, gfp_t gfp_flags)
+{
+       struct gb_message *message;
+       struct gb_operation_msg_hdr *header;
+       size_t message_size = payload_size + sizeof(*header);
+
+       if (message_size > hd->buffer_size_max) {
+               dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
+                               message_size, hd->buffer_size_max);
+               return NULL;
+       }
+
+       /* Allocate the message structure and buffer. */
+       message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
+       if (!message)
+               return NULL;
+
+       message->buffer = kzalloc(message_size, gfp_flags);
+       if (!message->buffer)
+               goto err_free_message;
+
+       /* Initialize the message.  Operation id is filled in later. */
+       gb_operation_message_init(hd, message, 0, payload_size, type);
+
+       return message;
+
+err_free_message:
+       kmem_cache_free(gb_message_cache, message);
+
+       return NULL;
+}
+
+static void gb_operation_message_free(struct gb_message *message)
+{
+       kfree(message->buffer);
+       kmem_cache_free(gb_message_cache, message);
+}
+
+/*
+ * Map an enum gb_operation_status value (which is represented in a
+ * message as a single byte) to an appropriate Linux negative errno.
+ */
+static int gb_operation_status_map(u8 status)
+{
+       switch (status) {
+       case GB_OP_SUCCESS:
+               return 0;
+       case GB_OP_INTERRUPTED:
+               return -EINTR;
+       case GB_OP_TIMEOUT:
+               return -ETIMEDOUT;
+       case GB_OP_NO_MEMORY:
+               return -ENOMEM;
+       case GB_OP_PROTOCOL_BAD:
+               return -EPROTONOSUPPORT;
+       case GB_OP_OVERFLOW:
+               return -EMSGSIZE;
+       case GB_OP_INVALID:
+               return -EINVAL;
+       case GB_OP_RETRY:
+               return -EAGAIN;
+       case GB_OP_NONEXISTENT:
+               return -ENODEV;
+       case GB_OP_MALFUNCTION:
+               return -EILSEQ;
+       case GB_OP_UNKNOWN_ERROR:
+       default:
+               return -EIO;
+       }
+}
+
+/*
+ * Map a Linux errno value (from operation->errno) into the value
+ * that should represent it in a response message status sent
+ * over the wire.  Returns an enum gb_operation_status value (which
+ * is represented in a message as a single byte).
+ */
+static u8 gb_operation_errno_map(int errno)
+{
+       switch (errno) {
+       case 0:
+               return GB_OP_SUCCESS;
+       case -EINTR:
+               return GB_OP_INTERRUPTED;
+       case -ETIMEDOUT:
+               return GB_OP_TIMEOUT;
+       case -ENOMEM:
+               return GB_OP_NO_MEMORY;
+       case -EPROTONOSUPPORT:
+               return GB_OP_PROTOCOL_BAD;
+       case -EMSGSIZE:
+               return GB_OP_OVERFLOW;  /* Could be underflow too */
+       case -EINVAL:
+               return GB_OP_INVALID;
+       case -EAGAIN:
+               return GB_OP_RETRY;
+       case -EILSEQ:
+               return GB_OP_MALFUNCTION;
+       case -ENODEV:
+               return GB_OP_NONEXISTENT;
+       case -EIO:
+       default:
+               return GB_OP_UNKNOWN_ERROR;
+       }
+}
+
+bool gb_operation_response_alloc(struct gb_operation *operation,
+                                       size_t response_size, gfp_t gfp)
+{
+       struct gb_host_device *hd = operation->connection->hd;
+       struct gb_operation_msg_hdr *request_header;
+       struct gb_message *response;
+       u8 type;
+
+       type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
+       response = gb_operation_message_alloc(hd, type, response_size, gfp);
+       if (!response)
+               return false;
+       response->operation = operation;
+
+       /*
+        * Size and type get initialized when the message is
+        * allocated.  The errno will be set before sending.  All
+        * that's left is the operation id, which we copy from the
+        * request message header (as-is, in little-endian order).
+        */
+       request_header = operation->request->header;
+       response->header->operation_id = request_header->operation_id;
+       operation->response = response;
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
+
+/*
+ * Create a Greybus operation to be sent over the given connection.
+ * The request buffer will be big enough for a payload of the given
+ * size.
+ *
+ * For outgoing requests, the request message's header will be
+ * initialized with the type of the request and the message size.
+ * Outgoing operations must also specify the response buffer size,
+ * which must be sufficient to hold all expected response data.  The
+ * response message header will eventually be overwritten, so there's
+ * no need to initialize it here.
+ *
+ * Request messages for incoming operations can arrive in interrupt
+ * context, so they must be allocated with GFP_ATOMIC.  In this case
+ * the request buffer will be immediately overwritten, so there is
+ * no need to initialize the message header.  Responsibility for
+ * allocating a response buffer lies with the incoming request
+ * handler for a protocol.  So we don't allocate that here.
+ *
+ * Returns a pointer to the new operation or a null pointer if an
+ * error occurs.
+ */
+static struct gb_operation *
+gb_operation_create_common(struct gb_connection *connection, u8 type,
+                               size_t request_size, size_t response_size,
+                               unsigned long op_flags, gfp_t gfp_flags)
+{
+       struct gb_host_device *hd = connection->hd;
+       struct gb_operation *operation;
+
+       operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
+       if (!operation)
+               return NULL;
+       operation->connection = connection;
+
+       operation->request = gb_operation_message_alloc(hd, type, request_size,
+                                                       gfp_flags);
+       if (!operation->request)
+               goto err_cache;
+       operation->request->operation = operation;
+
+       /* Allocate the response buffer for outgoing operations */
+       if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
+               if (!gb_operation_response_alloc(operation, response_size,
+                                                gfp_flags)) {
+                       goto err_request;
+               }
+       }
+
+       operation->flags = op_flags;
+       operation->type = type;
+       operation->errno = -EBADR;  /* Initial value--means "never set" */
+
+       INIT_WORK(&operation->work, gb_operation_work);
+       init_completion(&operation->completion);
+       kref_init(&operation->kref);
+       atomic_set(&operation->waiters, 0);
+
+       return operation;
+
+err_request:
+       gb_operation_message_free(operation->request);
+err_cache:
+       kmem_cache_free(gb_operation_cache, operation);
+
+       return NULL;
+}
+
+/*
+ * Create a new operation associated with the given connection.  The
+ * request and response sizes provided are the number of bytes
+ * required to hold the request/response payload only.  Both of
+ * these are allowed to be 0.  Note that 0x00 is reserved as an
+ * invalid operation type for all protocols, and this is enforced
+ * here.
+ */
+struct gb_operation *
+gb_operation_create_flags(struct gb_connection *connection,
+                               u8 type, size_t request_size,
+                               size_t response_size, unsigned long flags,
+                               gfp_t gfp)
+{
+       struct gb_operation *operation;
+
+       if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
+               return NULL;
+       if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
+               type &= ~GB_MESSAGE_TYPE_RESPONSE;
+
+       if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
+               flags &= GB_OPERATION_FLAG_USER_MASK;
+
+       operation = gb_operation_create_common(connection, type,
+                                               request_size, response_size,
+                                               flags, gfp);
+       if (operation)
+               trace_gb_operation_create(operation);
+
+       return operation;
+}
+EXPORT_SYMBOL_GPL(gb_operation_create_flags);
+
+struct gb_operation *
+gb_operation_create_core(struct gb_connection *connection,
+                               u8 type, size_t request_size,
+                               size_t response_size, unsigned long flags,
+                               gfp_t gfp)
+{
+       struct gb_operation *operation;
+
+       flags |= GB_OPERATION_FLAG_CORE;
+
+       operation = gb_operation_create_common(connection, type,
+                                               request_size, response_size,
+                                               flags, gfp);
+       if (operation)
+               trace_gb_operation_create_core(operation);
+
+       return operation;
+}
+/* Do not export this function. */
+
+size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+
+       return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
+}
+EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
+
+static struct gb_operation *
+gb_operation_create_incoming(struct gb_connection *connection, u16 id,
+                               u8 type, void *data, size_t size)
+{
+       struct gb_operation *operation;
+       size_t request_size;
+       unsigned long flags = GB_OPERATION_FLAG_INCOMING;
+
+       /* Caller has made sure we at least have a message header. */
+       request_size = size - sizeof(struct gb_operation_msg_hdr);
+
+       if (!id)
+               flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
+
+       operation = gb_operation_create_common(connection, type,
+                                               request_size,
+                                               GB_REQUEST_TYPE_INVALID,
+                                               flags, GFP_ATOMIC);
+       if (!operation)
+               return NULL;
+
+       operation->id = id;
+       memcpy(operation->request->header, data, size);
+       trace_gb_operation_create_incoming(operation);
+
+       return operation;
+}
+
+/*
+ * Get an additional reference on an operation.
+ */
+void gb_operation_get(struct gb_operation *operation)
+{
+       kref_get(&operation->kref);
+}
+EXPORT_SYMBOL_GPL(gb_operation_get);
+
+/*
+ * Destroy a previously created operation.
+ */
+static void _gb_operation_destroy(struct kref *kref)
+{
+       struct gb_operation *operation;
+
+       operation = container_of(kref, struct gb_operation, kref);
+
+       trace_gb_operation_destroy(operation);
+
+       if (operation->response)
+               gb_operation_message_free(operation->response);
+       gb_operation_message_free(operation->request);
+
+       kmem_cache_free(gb_operation_cache, operation);
+}
+
+/*
+ * Drop a reference on an operation, and destroy it when the last
+ * one is gone.
+ */
+void gb_operation_put(struct gb_operation *operation)
+{
+       if (WARN_ON(!operation))
+               return;
+
+       kref_put(&operation->kref, _gb_operation_destroy);
+}
+EXPORT_SYMBOL_GPL(gb_operation_put);
+
+/* Tell the requester we're done */
+static void gb_operation_sync_callback(struct gb_operation *operation)
+{
+       complete(&operation->completion);
+}
+
+/**
+ * gb_operation_request_send() - send an operation request message
+ * @operation: the operation to initiate
+ * @callback:  the operation completion callback
+ * @gfp:       the memory flags to use for any allocations
+ *
+ * The caller has filled in any payload so the request message is ready to go.
+ * The callback function supplied will be called when the response message has
+ * arrived, a unidirectional request has been sent, or the operation is
+ * cancelled, indicating that the operation is complete. The callback function
+ * can fetch the result of the operation using gb_operation_result() if
+ * desired.
+ *
+ * Return: 0 if the request was successfully queued in the host-driver queues,
+ * or a negative errno.
+ */
+int gb_operation_request_send(struct gb_operation *operation,
+                               gb_operation_callback callback,
+                               gfp_t gfp)
+{
+       struct gb_connection *connection = operation->connection;
+       struct gb_operation_msg_hdr *header;
+       unsigned int cycle;
+       int ret;
+
+       if (gb_connection_is_offloaded(connection))
+               return -EBUSY;
+
+       if (!callback)
+               return -EINVAL;
+
+       /*
+        * Record the callback function, which is executed in
+        * non-atomic (workqueue) context when the final result
+        * of an operation has been set.
+        */
+       operation->callback = callback;
+
+       /*
+        * Assign the operation's id, and store it in the request header.
+        * Zero is a reserved operation id for unidirectional operations.
+        */
+       if (gb_operation_is_unidirectional(operation)) {
+               operation->id = 0;
+       } else {
+               cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
+               operation->id = (u16)(cycle % U16_MAX + 1);
+       }
+
+       header = operation->request->header;
+       header->operation_id = cpu_to_le16(operation->id);
+
+       gb_operation_result_set(operation, -EINPROGRESS);
+
+       /*
+        * Get an extra reference on the operation. It'll be dropped when the
+        * operation completes.
+        */
+       gb_operation_get(operation);
+       ret = gb_operation_get_active(operation);
+       if (ret)
+               goto err_put;
+
+       ret = gb_message_send(operation->request, gfp);
+       if (ret)
+               goto err_put_active;
+
+       return 0;
+
+err_put_active:
+       gb_operation_put_active(operation);
+err_put:
+       gb_operation_put(operation);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_operation_request_send);
+
+/*
+ * Send a synchronous operation.  This function is expected to
+ * block, returning only when the response has arrived, (or when an
+ * error is detected.  The return value is the result of the
+ * operation.
+ */
+int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
+                                               unsigned int timeout)
+{
+       int ret;
+       unsigned long timeout_jiffies;
+
+       ret = gb_operation_request_send(operation, gb_operation_sync_callback,
+                                       GFP_KERNEL);
+       if (ret)
+               return ret;
+
+       if (timeout)
+               timeout_jiffies = msecs_to_jiffies(timeout);
+       else
+               timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+
+       ret = wait_for_completion_interruptible_timeout(&operation->completion,
+                                                       timeout_jiffies);
+       if (ret < 0) {
+               /* Cancel the operation if interrupted */
+               gb_operation_cancel(operation, -ECANCELED);
+       } else if (ret == 0) {
+               /* Cancel the operation if op timed out */
+               gb_operation_cancel(operation, -ETIMEDOUT);
+       }
+
+       return gb_operation_result(operation);
+}
+EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
+
+/*
+ * Send a response for an incoming operation request.  A non-zero
+ * errno indicates a failed operation.
+ *
+ * If there is any response payload, the incoming request handler is
+ * responsible for allocating the response message.  Otherwise the
+ * it can simply supply the result errno; this function will
+ * allocate the response message if necessary.
+ */
+static int gb_operation_response_send(struct gb_operation *operation,
+                                       int errno)
+{
+       struct gb_connection *connection = operation->connection;
+       int ret;
+
+       if (!operation->response &&
+                       !gb_operation_is_unidirectional(operation)) {
+               if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
+                       return -ENOMEM;
+       }
+
+       /* Record the result */
+       if (!gb_operation_result_set(operation, errno)) {
+               dev_err(&connection->hd->dev, "request result already set\n");
+               return -EIO;    /* Shouldn't happen */
+       }
+
+       /* Sender of request does not care about response. */
+       if (gb_operation_is_unidirectional(operation))
+               return 0;
+
+       /* Reference will be dropped when message has been sent. */
+       gb_operation_get(operation);
+       ret = gb_operation_get_active(operation);
+       if (ret)
+               goto err_put;
+
+       /* Fill in the response header and send it */
+       operation->response->header->result = gb_operation_errno_map(errno);
+
+       ret = gb_message_send(operation->response, GFP_KERNEL);
+       if (ret)
+               goto err_put_active;
+
+       return 0;
+
+err_put_active:
+       gb_operation_put_active(operation);
+err_put:
+       gb_operation_put(operation);
+
+       return ret;
+}
+
+/*
+ * This function is called when a message send request has completed.
+ */
+void greybus_message_sent(struct gb_host_device *hd,
+                                       struct gb_message *message, int status)
+{
+       struct gb_operation *operation = message->operation;
+       struct gb_connection *connection = operation->connection;
+
+       /*
+        * If the message was a response, we just need to drop our
+        * reference to the operation.  If an error occurred, report
+        * it.
+        *
+        * For requests, if there's no error and the operation in not
+        * unidirectional, there's nothing more to do until the response
+        * arrives. If an error occurred attempting to send it, or if the
+        * operation is unidrectional, record the result of the operation and
+        * schedule its completion.
+        */
+       if (message == operation->response) {
+               if (status) {
+                       dev_err(&connection->hd->dev,
+                               "%s: error sending response 0x%02x: %d\n",
+                               connection->name, operation->type, status);
+               }
+
+               gb_operation_put_active(operation);
+               gb_operation_put(operation);
+       } else if (status || gb_operation_is_unidirectional(operation)) {
+               if (gb_operation_result_set(operation, status)) {
+                       queue_work(gb_operation_completion_wq,
+                                       &operation->work);
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(greybus_message_sent);
+
+/*
+ * We've received data on a connection, and it doesn't look like a
+ * response, so we assume it's a request.
+ *
+ * This is called in interrupt context, so just copy the incoming
+ * data into the request buffer and handle the rest via workqueue.
+ */
+static void gb_connection_recv_request(struct gb_connection *connection,
+                               const struct gb_operation_msg_hdr *header,
+                               void *data, size_t size)
+{
+       struct gb_operation *operation;
+       u16 operation_id;
+       u8 type;
+       int ret;
+
+       operation_id = le16_to_cpu(header->operation_id);
+       type = header->type;
+
+       operation = gb_operation_create_incoming(connection, operation_id,
+                                               type, data, size);
+       if (!operation) {
+               dev_err(&connection->hd->dev,
+                       "%s: can't create incoming operation\n",
+                       connection->name);
+               return;
+       }
+
+       ret = gb_operation_get_active(operation);
+       if (ret) {
+               gb_operation_put(operation);
+               return;
+       }
+       trace_gb_message_recv_request(operation->request);
+
+       /*
+        * The initial reference to the operation will be dropped when the
+        * request handler returns.
+        */
+       if (gb_operation_result_set(operation, -EINPROGRESS))
+               queue_work(connection->wq, &operation->work);
+}
+
+/*
+ * We've received data that appears to be an operation response
+ * message.  Look up the operation, and record that we've received
+ * its response.
+ *
+ * This is called in interrupt context, so just copy the incoming
+ * data into the response buffer and handle the rest via workqueue.
+ */
+static void gb_connection_recv_response(struct gb_connection *connection,
+                               const struct gb_operation_msg_hdr *header,
+                               void *data, size_t size)
+{
+       struct gb_operation *operation;
+       struct gb_message *message;
+       size_t message_size;
+       u16 operation_id;
+       int errno;
+
+       operation_id = le16_to_cpu(header->operation_id);
+
+       if (!operation_id) {
+               dev_err_ratelimited(&connection->hd->dev,
+                               "%s: invalid response id 0 received\n",
+                               connection->name);
+               return;
+       }
+
+       operation = gb_operation_find_outgoing(connection, operation_id);
+       if (!operation) {
+               dev_err_ratelimited(&connection->hd->dev,
+                               "%s: unexpected response id 0x%04x received\n",
+                               connection->name, operation_id);
+               return;
+       }
+
+       errno = gb_operation_status_map(header->result);
+       message = operation->response;
+       message_size = sizeof(*header) + message->payload_size;
+       if (!errno && size > message_size) {
+               dev_err_ratelimited(&connection->hd->dev,
+                               "%s: malformed response 0x%02x received (%zu > %zu)\n",
+                               connection->name, header->type,
+                               size, message_size);
+               errno = -EMSGSIZE;
+       } else if (!errno && size < message_size) {
+               if (gb_operation_short_response_allowed(operation)) {
+                       message->payload_size = size - sizeof(*header);
+               } else {
+                       dev_err_ratelimited(&connection->hd->dev,
+                                       "%s: short response 0x%02x received (%zu < %zu)\n",
+                                       connection->name, header->type,
+                                       size, message_size);
+                       errno = -EMSGSIZE;
+               }
+       }
+
+       /* We must ignore the payload if a bad status is returned */
+       if (errno)
+               size = sizeof(*header);
+
+       /* The rest will be handled in work queue context */
+       if (gb_operation_result_set(operation, errno)) {
+               memcpy(message->buffer, data, size);
+
+               trace_gb_message_recv_response(message);
+
+               queue_work(gb_operation_completion_wq, &operation->work);
+       }
+
+       gb_operation_put(operation);
+}
+
+/*
+ * Handle data arriving on a connection.  As soon as we return the
+ * supplied data buffer will be reused (so unless we do something
+ * with, it's effectively dropped).
+ */
+void gb_connection_recv(struct gb_connection *connection,
+                               void *data, size_t size)
+{
+       struct gb_operation_msg_hdr header;
+       struct device *dev = &connection->hd->dev;
+       size_t msg_size;
+
+       if (connection->state == GB_CONNECTION_STATE_DISABLED ||
+                       gb_connection_is_offloaded(connection)) {
+               dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
+                               connection->name, size);
+               return;
+       }
+
+       if (size < sizeof(header)) {
+               dev_err_ratelimited(dev, "%s: short message received\n",
+                               connection->name);
+               return;
+       }
+
+       /* Use memcpy as data may be unaligned */
+       memcpy(&header, data, sizeof(header));
+       msg_size = le16_to_cpu(header.size);
+       if (size < msg_size) {
+               dev_err_ratelimited(dev,
+                               "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
+                               connection->name,
+                               le16_to_cpu(header.operation_id),
+                               header.type, size, msg_size);
+               return;         /* XXX Should still complete operation */
+       }
+
+       if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
+               gb_connection_recv_response(connection, &header, data,
+                                               msg_size);
+       } else {
+               gb_connection_recv_request(connection, &header, data,
+                                               msg_size);
+       }
+}
+
+/*
+ * Cancel an outgoing operation synchronously, and record the given error to
+ * indicate why.
+ */
+void gb_operation_cancel(struct gb_operation *operation, int errno)
+{
+       if (WARN_ON(gb_operation_is_incoming(operation)))
+               return;
+
+       if (gb_operation_result_set(operation, errno)) {
+               gb_message_cancel(operation->request);
+               queue_work(gb_operation_completion_wq, &operation->work);
+       }
+       trace_gb_message_cancel_outgoing(operation->request);
+
+       atomic_inc(&operation->waiters);
+       wait_event(gb_operation_cancellation_queue,
+                       !gb_operation_is_active(operation));
+       atomic_dec(&operation->waiters);
+}
+EXPORT_SYMBOL_GPL(gb_operation_cancel);
+
+/*
+ * Cancel an incoming operation synchronously. Called during connection tear
+ * down.
+ */
+void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
+{
+       if (WARN_ON(!gb_operation_is_incoming(operation)))
+               return;
+
+       if (!gb_operation_is_unidirectional(operation)) {
+               /*
+                * Make sure the request handler has submitted the response
+                * before cancelling it.
+                */
+               flush_work(&operation->work);
+               if (!gb_operation_result_set(operation, errno))
+                       gb_message_cancel(operation->response);
+       }
+       trace_gb_message_cancel_incoming(operation->response);
+
+       atomic_inc(&operation->waiters);
+       wait_event(gb_operation_cancellation_queue,
+                       !gb_operation_is_active(operation));
+       atomic_dec(&operation->waiters);
+}
+
+/**
+ * gb_operation_sync_timeout() - implement a "simple" synchronous operation
+ * @connection: the Greybus connection to send this to
+ * @type: the type of operation to send
+ * @request: pointer to a memory buffer to copy the request from
+ * @request_size: size of @request
+ * @response: pointer to a memory buffer to copy the response to
+ * @response_size: the size of @response.
+ * @timeout: operation timeout in milliseconds
+ *
+ * This function implements a simple synchronous Greybus operation.  It sends
+ * the provided operation request and waits (sleeps) until the corresponding
+ * operation response message has been successfully received, or an error
+ * occurs.  @request and @response are buffers to hold the request and response
+ * data respectively, and if they are not NULL, their size must be specified in
+ * @request_size and @response_size.
+ *
+ * If a response payload is to come back, and @response is not NULL,
+ * @response_size number of bytes will be copied into @response if the operation
+ * is successful.
+ *
+ * If there is an error, the response buffer is left alone.
+ */
+int gb_operation_sync_timeout(struct gb_connection *connection, int type,
+                               void *request, int request_size,
+                               void *response, int response_size,
+                               unsigned int timeout)
+{
+       struct gb_operation *operation;
+       int ret;
+
+       if ((response_size && !response) ||
+           (request_size && !request))
+               return -EINVAL;
+
+       operation = gb_operation_create(connection, type,
+                                       request_size, response_size,
+                                       GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       if (request_size)
+               memcpy(operation->request->payload, request, request_size);
+
+       ret = gb_operation_request_send_sync_timeout(operation, timeout);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
+                       connection->name, operation->id, type, ret);
+       } else {
+               if (response_size) {
+                       memcpy(response, operation->response->payload,
+                              response_size);
+               }
+       }
+
+       gb_operation_put(operation);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
+
+/**
+ * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
+ * @connection:                connection to use
+ * @type:              type of operation to send
+ * @request:           memory buffer to copy the request from
+ * @request_size:      size of @request
+ * @timeout:           send timeout in milliseconds
+ *
+ * Initiate a unidirectional operation by sending a request message and
+ * waiting for it to be acknowledged as sent by the host device.
+ *
+ * Note that successful send of a unidirectional operation does not imply that
+ * the request as actually reached the remote end of the connection.
+ */
+int gb_operation_unidirectional_timeout(struct gb_connection *connection,
+                               int type, void *request, int request_size,
+                               unsigned int timeout)
+{
+       struct gb_operation *operation;
+       int ret;
+
+       if (request_size && !request)
+               return -EINVAL;
+
+       operation = gb_operation_create_flags(connection, type,
+                                       request_size, 0,
+                                       GB_OPERATION_FLAG_UNIDIRECTIONAL,
+                                       GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       if (request_size)
+               memcpy(operation->request->payload, request, request_size);
+
+       ret = gb_operation_request_send_sync_timeout(operation, timeout);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: unidirectional operation of type 0x%02x failed: %d\n",
+                       connection->name, type, ret);
+       }
+
+       gb_operation_put(operation);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
+
+int __init gb_operation_init(void)
+{
+       gb_message_cache = kmem_cache_create("gb_message_cache",
+                               sizeof(struct gb_message), 0, 0, NULL);
+       if (!gb_message_cache)
+               return -ENOMEM;
+
+       gb_operation_cache = kmem_cache_create("gb_operation_cache",
+                               sizeof(struct gb_operation), 0, 0, NULL);
+       if (!gb_operation_cache)
+               goto err_destroy_message_cache;
+
+       gb_operation_completion_wq = alloc_workqueue("greybus_completion",
+                               0, 0);
+       if (!gb_operation_completion_wq)
+               goto err_destroy_operation_cache;
+
+       return 0;
+
+err_destroy_operation_cache:
+       kmem_cache_destroy(gb_operation_cache);
+       gb_operation_cache = NULL;
+err_destroy_message_cache:
+       kmem_cache_destroy(gb_message_cache);
+       gb_message_cache = NULL;
+
+       return -ENOMEM;
+}
+
+void gb_operation_exit(void)
+{
+       destroy_workqueue(gb_operation_completion_wq);
+       gb_operation_completion_wq = NULL;
+       kmem_cache_destroy(gb_operation_cache);
+       gb_operation_cache = NULL;
+       kmem_cache_destroy(gb_message_cache);
+       gb_message_cache = NULL;
+}
diff --git a/drivers/staging/greybus/operation.h b/drivers/staging/greybus/operation.h
new file mode 100644 (file)
index 0000000..de09a2c
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+ * Greybus operations
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __OPERATION_H
+#define __OPERATION_H
+
+#include <linux/completion.h>
+
+struct gb_operation;
+
+/* The default amount of time a request is given to complete */
+#define GB_OPERATION_TIMEOUT_DEFAULT   1000    /* milliseconds */
+
+/*
+ * The top bit of the type in an operation message header indicates
+ * whether the message is a request (bit clear) or response (bit set)
+ */
+#define GB_MESSAGE_TYPE_RESPONSE       ((u8)0x80)
+
+enum gb_operation_result {
+       GB_OP_SUCCESS           = 0x00,
+       GB_OP_INTERRUPTED       = 0x01,
+       GB_OP_TIMEOUT           = 0x02,
+       GB_OP_NO_MEMORY         = 0x03,
+       GB_OP_PROTOCOL_BAD      = 0x04,
+       GB_OP_OVERFLOW          = 0x05,
+       GB_OP_INVALID           = 0x06,
+       GB_OP_RETRY             = 0x07,
+       GB_OP_NONEXISTENT       = 0x08,
+       GB_OP_UNKNOWN_ERROR     = 0xfe,
+       GB_OP_MALFUNCTION       = 0xff,
+};
+
+#define GB_OPERATION_MESSAGE_SIZE_MIN  sizeof(struct gb_operation_msg_hdr)
+#define GB_OPERATION_MESSAGE_SIZE_MAX  U16_MAX
+
+/*
+ * Protocol code should only examine the payload and payload_size fields, and
+ * host-controller drivers may use the hcpriv field. All other fields are
+ * intended to be private to the operations core code.
+ */
+struct gb_message {
+       struct gb_operation             *operation;
+       struct gb_operation_msg_hdr     *header;
+
+       void                            *payload;
+       size_t                          payload_size;
+
+       void                            *buffer;
+
+       void                            *hcpriv;
+};
+
+#define GB_OPERATION_FLAG_INCOMING             BIT(0)
+#define GB_OPERATION_FLAG_UNIDIRECTIONAL       BIT(1)
+#define GB_OPERATION_FLAG_SHORT_RESPONSE       BIT(2)
+#define GB_OPERATION_FLAG_CORE                 BIT(3)
+
+#define GB_OPERATION_FLAG_USER_MASK    (GB_OPERATION_FLAG_SHORT_RESPONSE | \
+                                        GB_OPERATION_FLAG_UNIDIRECTIONAL)
+
+/*
+ * A Greybus operation is a remote procedure call performed over a
+ * connection between two UniPro interfaces.
+ *
+ * Every operation consists of a request message sent to the other
+ * end of the connection coupled with a reply message returned to
+ * the sender.  Every operation has a type, whose interpretation is
+ * dependent on the protocol associated with the connection.
+ *
+ * Only four things in an operation structure are intended to be
+ * directly usable by protocol handlers:  the operation's connection
+ * pointer; the operation type; the request message payload (and
+ * size); and the response message payload (and size).  Note that a
+ * message with a 0-byte payload has a null message payload pointer.
+ *
+ * In addition, every operation has a result, which is an errno
+ * value.  Protocol handlers access the operation result using
+ * gb_operation_result().
+ */
+typedef void (*gb_operation_callback)(struct gb_operation *);
+struct gb_operation {
+       struct gb_connection    *connection;
+       struct gb_message       *request;
+       struct gb_message       *response;
+
+       unsigned long           flags;
+       u8                      type;
+       u16                     id;
+       int                     errno;          /* Operation result */
+
+       struct work_struct      work;
+       gb_operation_callback   callback;
+       struct completion       completion;
+
+       struct kref             kref;
+       atomic_t                waiters;
+
+       int                     active;
+       struct list_head        links;          /* connection->operations */
+};
+
+static inline bool
+gb_operation_is_incoming(struct gb_operation *operation)
+{
+       return operation->flags & GB_OPERATION_FLAG_INCOMING;
+}
+
+static inline bool
+gb_operation_is_unidirectional(struct gb_operation *operation)
+{
+       return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL;
+}
+
+static inline bool
+gb_operation_short_response_allowed(struct gb_operation *operation)
+{
+       return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE;
+}
+
+static inline bool gb_operation_is_core(struct gb_operation *operation)
+{
+       return operation->flags & GB_OPERATION_FLAG_CORE;
+}
+
+void gb_connection_recv(struct gb_connection *connection,
+                                       void *data, size_t size);
+
+int gb_operation_result(struct gb_operation *operation);
+
+size_t gb_operation_get_payload_size_max(struct gb_connection *connection);
+struct gb_operation *
+gb_operation_create_flags(struct gb_connection *connection,
+                               u8 type, size_t request_size,
+                               size_t response_size, unsigned long flags,
+                               gfp_t gfp);
+
+static inline struct gb_operation *
+gb_operation_create(struct gb_connection *connection,
+                               u8 type, size_t request_size,
+                               size_t response_size, gfp_t gfp)
+{
+       return gb_operation_create_flags(connection, type, request_size,
+                                               response_size, 0, gfp);
+}
+
+struct gb_operation *
+gb_operation_create_core(struct gb_connection *connection,
+                               u8 type, size_t request_size,
+                               size_t response_size, unsigned long flags,
+                               gfp_t gfp);
+
+void gb_operation_get(struct gb_operation *operation);
+void gb_operation_put(struct gb_operation *operation);
+
+bool gb_operation_response_alloc(struct gb_operation *operation,
+                                       size_t response_size, gfp_t gfp);
+
+int gb_operation_request_send(struct gb_operation *operation,
+                               gb_operation_callback callback,
+                               gfp_t gfp);
+int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
+                                               unsigned int timeout);
+static inline int
+gb_operation_request_send_sync(struct gb_operation *operation)
+{
+       return gb_operation_request_send_sync_timeout(operation,
+                       GB_OPERATION_TIMEOUT_DEFAULT);
+}
+
+void gb_operation_cancel(struct gb_operation *operation, int errno);
+void gb_operation_cancel_incoming(struct gb_operation *operation, int errno);
+
+void greybus_message_sent(struct gb_host_device *hd,
+                               struct gb_message *message, int status);
+
+int gb_operation_sync_timeout(struct gb_connection *connection, int type,
+                               void *request, int request_size,
+                               void *response, int response_size,
+                               unsigned int timeout);
+int gb_operation_unidirectional_timeout(struct gb_connection *connection,
+                               int type, void *request, int request_size,
+                               unsigned int timeout);
+
+static inline int gb_operation_sync(struct gb_connection *connection, int type,
+                     void *request, int request_size,
+                     void *response, int response_size)
+{
+       return gb_operation_sync_timeout(connection, type,
+                       request, request_size, response, response_size,
+                       GB_OPERATION_TIMEOUT_DEFAULT);
+}
+
+static inline int gb_operation_unidirectional(struct gb_connection *connection,
+                               int type, void *request, int request_size)
+{
+       return gb_operation_unidirectional_timeout(connection, type,
+                       request, request_size, GB_OPERATION_TIMEOUT_DEFAULT);
+}
+
+int gb_operation_init(void);
+void gb_operation_exit(void);
+
+#endif /* !__OPERATION_H */
diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c
new file mode 100644 (file)
index 0000000..68dd3d2
--- /dev/null
@@ -0,0 +1,1161 @@
+/*
+ * Power Supply driver for a Greybus module.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+
+#include "greybus.h"
+
+#define PROP_MAX 32
+
+struct gb_power_supply_prop {
+       enum power_supply_property      prop;
+       u8                              gb_prop;
+       int                             val;
+       int                             previous_val;
+       bool                            is_writeable;
+};
+
+struct gb_power_supply {
+       u8                              id;
+       bool                            registered;
+#ifndef CORE_OWNS_PSY_STRUCT
+       struct power_supply             psy;
+#define to_gb_power_supply(x) container_of(x, struct gb_power_supply, psy)
+#else
+       struct power_supply             *psy;
+       struct power_supply_desc        desc;
+#define to_gb_power_supply(x) power_supply_get_drvdata(x)
+#endif
+       char                            name[64];
+       struct gb_power_supplies        *supplies;
+       struct delayed_work             work;
+       char                            *manufacturer;
+       char                            *model_name;
+       char                            *serial_number;
+       u8                              type;
+       u8                              properties_count;
+       u8                              properties_count_str;
+       unsigned long                   last_update;
+       u8                              cache_invalid;
+       unsigned int                    update_interval;
+       bool                            changed;
+       struct gb_power_supply_prop     *props;
+       enum power_supply_property      *props_raw;
+       bool                            pm_acquired;
+       struct mutex                    supply_lock;
+};
+
+struct gb_power_supplies {
+       struct gb_connection    *connection;
+       u8                      supplies_count;
+       struct gb_power_supply  *supply;
+       struct mutex            supplies_lock;
+};
+
+/* cache time in milliseconds, if cache_time is set to 0 cache is disable */
+static unsigned int cache_time = 1000;
+/*
+ * update interval initial and maximum value, between the two will
+ * back-off exponential
+ */
+static unsigned int update_interval_init = 1 * HZ;
+static unsigned int update_interval_max = 30 * HZ;
+
+struct gb_power_supply_changes {
+       enum power_supply_property      prop;
+       u32                             tolerance_change;
+       void (*prop_changed)(struct gb_power_supply *gbpsy,
+                            struct gb_power_supply_prop *prop);
+};
+
+static void gb_power_supply_state_change(struct gb_power_supply *gbpsy,
+                                        struct gb_power_supply_prop *prop);
+
+static const struct gb_power_supply_changes psy_props_changes[] = {
+       {       .prop                   = GB_POWER_SUPPLY_PROP_STATUS,
+               .tolerance_change       = 0,
+               .prop_changed           = gb_power_supply_state_change,
+       },
+       {       .prop                   = GB_POWER_SUPPLY_PROP_TEMP,
+               .tolerance_change       = 500,
+               .prop_changed           = NULL,
+       },
+       {       .prop                   = GB_POWER_SUPPLY_PROP_ONLINE,
+               .tolerance_change       = 0,
+               .prop_changed           = NULL,
+       },
+};
+
+static int get_psp_from_gb_prop(int gb_prop, enum power_supply_property *psp)
+{
+       int prop;
+
+       switch (gb_prop) {
+       case GB_POWER_SUPPLY_PROP_STATUS:
+               prop = POWER_SUPPLY_PROP_STATUS;
+               break;
+       case GB_POWER_SUPPLY_PROP_CHARGE_TYPE:
+               prop = POWER_SUPPLY_PROP_CHARGE_TYPE;
+               break;
+       case GB_POWER_SUPPLY_PROP_HEALTH:
+               prop = POWER_SUPPLY_PROP_HEALTH;
+               break;
+       case GB_POWER_SUPPLY_PROP_PRESENT:
+               prop = POWER_SUPPLY_PROP_PRESENT;
+               break;
+       case GB_POWER_SUPPLY_PROP_ONLINE:
+               prop = POWER_SUPPLY_PROP_ONLINE;
+               break;
+       case GB_POWER_SUPPLY_PROP_AUTHENTIC:
+               prop = POWER_SUPPLY_PROP_AUTHENTIC;
+               break;
+       case GB_POWER_SUPPLY_PROP_TECHNOLOGY:
+               prop = POWER_SUPPLY_PROP_TECHNOLOGY;
+               break;
+       case GB_POWER_SUPPLY_PROP_CYCLE_COUNT:
+               prop = POWER_SUPPLY_PROP_CYCLE_COUNT;
+               break;
+       case GB_POWER_SUPPLY_PROP_VOLTAGE_MAX:
+               prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
+               break;
+       case GB_POWER_SUPPLY_PROP_VOLTAGE_MIN:
+               prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
+               break;
+       case GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+               prop = POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN;
+               break;
+       case GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+               prop = POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN;
+               break;
+       case GB_POWER_SUPPLY_PROP_VOLTAGE_NOW:
+               prop = POWER_SUPPLY_PROP_VOLTAGE_NOW;
+               break;
+       case GB_POWER_SUPPLY_PROP_VOLTAGE_AVG:
+               prop = POWER_SUPPLY_PROP_VOLTAGE_AVG;
+               break;
+       case GB_POWER_SUPPLY_PROP_VOLTAGE_OCV:
+               prop = POWER_SUPPLY_PROP_VOLTAGE_OCV;
+               break;
+       case GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT:
+               prop = POWER_SUPPLY_PROP_VOLTAGE_BOOT;
+               break;
+       case GB_POWER_SUPPLY_PROP_CURRENT_MAX:
+               prop = POWER_SUPPLY_PROP_CURRENT_MAX;
+               break;
+       case GB_POWER_SUPPLY_PROP_CURRENT_NOW:
+               prop = POWER_SUPPLY_PROP_CURRENT_NOW;
+               break;
+       case GB_POWER_SUPPLY_PROP_CURRENT_AVG:
+               prop = POWER_SUPPLY_PROP_CURRENT_AVG;
+               break;
+       case GB_POWER_SUPPLY_PROP_CURRENT_BOOT:
+               prop = POWER_SUPPLY_PROP_CURRENT_BOOT;
+               break;
+       case GB_POWER_SUPPLY_PROP_POWER_NOW:
+               prop = POWER_SUPPLY_PROP_POWER_NOW;
+               break;
+       case GB_POWER_SUPPLY_PROP_POWER_AVG:
+               prop = POWER_SUPPLY_PROP_POWER_AVG;
+               break;
+       case GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+               prop = POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN;
+               break;
+       case GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN:
+               prop = POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN;
+               break;
+       case GB_POWER_SUPPLY_PROP_CHARGE_FULL:
+               prop = POWER_SUPPLY_PROP_CHARGE_FULL;
+               break;
+       case GB_POWER_SUPPLY_PROP_CHARGE_EMPTY:
+               prop = POWER_SUPPLY_PROP_CHARGE_EMPTY;
+               break;
+       case GB_POWER_SUPPLY_PROP_CHARGE_NOW:
+               prop = POWER_SUPPLY_PROP_CHARGE_NOW;
+               break;
+       case GB_POWER_SUPPLY_PROP_CHARGE_AVG:
+               prop = POWER_SUPPLY_PROP_CHARGE_AVG;
+               break;
+       case GB_POWER_SUPPLY_PROP_CHARGE_COUNTER:
+               prop = POWER_SUPPLY_PROP_CHARGE_COUNTER;
+               break;
+       case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+               prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT;
+               break;
+       case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+               prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX;
+               break;
+       case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+               prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE;
+               break;
+       case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+               prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX;
+               break;
+       case GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+               prop = POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT;
+               break;
+       case GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+               prop = POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX;
+               break;
+       case GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+               prop = POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT;
+               break;
+       case GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+               prop = POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN;
+               break;
+       case GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN:
+               prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
+               break;
+       case GB_POWER_SUPPLY_PROP_ENERGY_FULL:
+               prop = POWER_SUPPLY_PROP_ENERGY_FULL;
+               break;
+       case GB_POWER_SUPPLY_PROP_ENERGY_EMPTY:
+               prop = POWER_SUPPLY_PROP_ENERGY_EMPTY;
+               break;
+       case GB_POWER_SUPPLY_PROP_ENERGY_NOW:
+               prop = POWER_SUPPLY_PROP_ENERGY_NOW;
+               break;
+       case GB_POWER_SUPPLY_PROP_ENERGY_AVG:
+               prop = POWER_SUPPLY_PROP_ENERGY_AVG;
+               break;
+       case GB_POWER_SUPPLY_PROP_CAPACITY:
+               prop = POWER_SUPPLY_PROP_CAPACITY;
+               break;
+       case GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN:
+               prop = POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN;
+               break;
+       case GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX:
+               prop = POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX;
+               break;
+       case GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+               prop = POWER_SUPPLY_PROP_CAPACITY_LEVEL;
+               break;
+       case GB_POWER_SUPPLY_PROP_TEMP:
+               prop = POWER_SUPPLY_PROP_TEMP;
+               break;
+       case GB_POWER_SUPPLY_PROP_TEMP_MAX:
+               prop = POWER_SUPPLY_PROP_TEMP_MAX;
+               break;
+       case GB_POWER_SUPPLY_PROP_TEMP_MIN:
+               prop = POWER_SUPPLY_PROP_TEMP_MIN;
+               break;
+       case GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
+               prop = POWER_SUPPLY_PROP_TEMP_ALERT_MIN;
+               break;
+       case GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+               prop = POWER_SUPPLY_PROP_TEMP_ALERT_MAX;
+               break;
+       case GB_POWER_SUPPLY_PROP_TEMP_AMBIENT:
+               prop = POWER_SUPPLY_PROP_TEMP_AMBIENT;
+               break;
+       case GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
+               prop = POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN;
+               break;
+       case GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
+               prop = POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX;
+               break;
+       case GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+               prop = POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW;
+               break;
+       case GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+               prop = POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG;
+               break;
+       case GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
+               prop = POWER_SUPPLY_PROP_TIME_TO_FULL_NOW;
+               break;
+       case GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+               prop = POWER_SUPPLY_PROP_TIME_TO_FULL_AVG;
+               break;
+       case GB_POWER_SUPPLY_PROP_TYPE:
+               prop = POWER_SUPPLY_PROP_TYPE;
+               break;
+       case GB_POWER_SUPPLY_PROP_SCOPE:
+               prop = POWER_SUPPLY_PROP_SCOPE;
+               break;
+       case GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+               prop = POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT;
+               break;
+       case GB_POWER_SUPPLY_PROP_CALIBRATE:
+               prop = POWER_SUPPLY_PROP_CALIBRATE;
+               break;
+       default:
+               prop = -1;
+               break;
+       }
+
+       if (prop < 0)
+               return prop;
+
+       *psp = (enum power_supply_property)prop;
+
+       return 0;
+}
+
+static struct gb_connection *get_conn_from_psy(struct gb_power_supply *gbpsy)
+{
+       return gbpsy->supplies->connection;
+}
+
+static struct gb_power_supply_prop *get_psy_prop(struct gb_power_supply *gbpsy,
+                                                enum power_supply_property psp)
+{
+       int i;
+
+       for (i = 0; i < gbpsy->properties_count; i++)
+               if (gbpsy->props[i].prop == psp)
+                       return &gbpsy->props[i];
+       return NULL;
+}
+
+static int is_psy_prop_writeable(struct gb_power_supply *gbpsy,
+                                    enum power_supply_property psp)
+{
+       struct gb_power_supply_prop *prop;
+
+       prop = get_psy_prop(gbpsy, psp);
+       if (!prop)
+               return -ENOENT;
+       return prop->is_writeable ? 1 : 0;
+}
+
+static int is_prop_valint(enum power_supply_property psp)
+{
+       return ((psp < POWER_SUPPLY_PROP_MODEL_NAME) ? 1 : 0);
+}
+
+static void next_interval(struct gb_power_supply *gbpsy)
+{
+       if (gbpsy->update_interval == update_interval_max)
+               return;
+
+       /* do some exponential back-off in the update interval */
+       gbpsy->update_interval *= 2;
+       if (gbpsy->update_interval > update_interval_max)
+               gbpsy->update_interval = update_interval_max;
+}
+
+#ifndef CORE_OWNS_PSY_STRUCT
+static void __gb_power_supply_changed(struct gb_power_supply *gbpsy)
+{
+       power_supply_changed(&gbpsy->psy);
+}
+#else
+static void __gb_power_supply_changed(struct gb_power_supply *gbpsy)
+{
+       power_supply_changed(gbpsy->psy);
+}
+#endif
+
+static void gb_power_supply_state_change(struct gb_power_supply *gbpsy,
+                                        struct gb_power_supply_prop *prop)
+{
+       struct gb_connection *connection = get_conn_from_psy(gbpsy);
+       int ret;
+
+       /*
+        * Check gbpsy->pm_acquired to make sure only one pair of 'get_sync'
+        * and 'put_autosuspend' runtime pm call for state property change.
+        */
+       mutex_lock(&gbpsy->supply_lock);
+
+       if ((prop->val == GB_POWER_SUPPLY_STATUS_CHARGING) &&
+           !gbpsy->pm_acquired) {
+               ret = gb_pm_runtime_get_sync(connection->bundle);
+               if (ret)
+                       dev_err(&connection->bundle->dev,
+                               "Fail to set wake lock for charging state\n");
+               else
+                       gbpsy->pm_acquired = true;
+       } else {
+               if (gbpsy->pm_acquired) {
+                       ret = gb_pm_runtime_put_autosuspend(connection->bundle);
+                       if (ret)
+                               dev_err(&connection->bundle->dev,
+                                       "Fail to set wake unlock for none charging\n");
+                       else
+                               gbpsy->pm_acquired = false;
+               }
+       }
+
+       mutex_unlock(&gbpsy->supply_lock);
+}
+
+static void check_changed(struct gb_power_supply *gbpsy,
+                         struct gb_power_supply_prop *prop)
+{
+       const struct gb_power_supply_changes *psyc;
+       int val = prop->val;
+       int prev_val = prop->previous_val;
+       bool changed = false;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(psy_props_changes); i++) {
+               psyc = &psy_props_changes[i];
+               if (prop->prop == psyc->prop) {
+                       if (!psyc->tolerance_change)
+                               changed = true;
+                       else if (val < prev_val &&
+                                prev_val - val > psyc->tolerance_change)
+                               changed = true;
+                       else if (val > prev_val &&
+                                val - prev_val > psyc->tolerance_change)
+                               changed = true;
+
+                       if (changed && psyc->prop_changed)
+                               psyc->prop_changed(gbpsy, prop);
+
+                       if (changed)
+                               gbpsy->changed = true;
+                       break;
+               }
+       }
+}
+
+static int total_props(struct gb_power_supply *gbpsy)
+{
+       /* this return the intval plus the strval properties */
+       return (gbpsy->properties_count + gbpsy->properties_count_str);
+}
+
+static void prop_append(struct gb_power_supply *gbpsy,
+                       enum power_supply_property prop)
+{
+       enum power_supply_property *new_props_raw;
+
+       gbpsy->properties_count_str++;
+       new_props_raw = krealloc(gbpsy->props_raw, total_props(gbpsy) *
+                                sizeof(enum power_supply_property),
+                                GFP_KERNEL);
+       if (!new_props_raw)
+               return;
+       gbpsy->props_raw = new_props_raw;
+       gbpsy->props_raw[total_props(gbpsy) - 1] = prop;
+}
+
+static int __gb_power_supply_set_name(char *init_name, char *name, size_t len)
+{
+       unsigned int i = 0;
+       int ret = 0;
+       struct power_supply *psy;
+
+       if (!strlen(init_name))
+               init_name = "gb_power_supply";
+       strlcpy(name, init_name, len);
+
+       while ((ret < len) && (psy = power_supply_get_by_name(name))) {
+#ifdef PSY_HAVE_PUT
+               power_supply_put(psy);
+#endif
+               ret = snprintf(name, len, "%s_%u", init_name, ++i);
+       }
+       if (ret >= len)
+               return -ENOMEM;
+       return i;
+}
+
+static void _gb_power_supply_append_props(struct gb_power_supply *gbpsy)
+{
+       if (strlen(gbpsy->manufacturer))
+               prop_append(gbpsy, POWER_SUPPLY_PROP_MANUFACTURER);
+       if (strlen(gbpsy->model_name))
+               prop_append(gbpsy, POWER_SUPPLY_PROP_MODEL_NAME);
+       if (strlen(gbpsy->serial_number))
+               prop_append(gbpsy, POWER_SUPPLY_PROP_SERIAL_NUMBER);
+}
+
+static int gb_power_supply_description_get(struct gb_power_supply *gbpsy)
+{
+       struct gb_connection *connection = get_conn_from_psy(gbpsy);
+       struct gb_power_supply_get_description_request req;
+       struct gb_power_supply_get_description_response resp;
+       int ret;
+
+       req.psy_id = gbpsy->id;
+
+       ret = gb_operation_sync(connection,
+                               GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION,
+                               &req, sizeof(req), &resp, sizeof(resp));
+       if (ret < 0)
+               return ret;
+
+       gbpsy->manufacturer = kstrndup(resp.manufacturer, PROP_MAX, GFP_KERNEL);
+       if (!gbpsy->manufacturer)
+               return -ENOMEM;
+       gbpsy->model_name = kstrndup(resp.model, PROP_MAX, GFP_KERNEL);
+       if (!gbpsy->model_name)
+               return -ENOMEM;
+       gbpsy->serial_number = kstrndup(resp.serial_number, PROP_MAX,
+                                      GFP_KERNEL);
+       if (!gbpsy->serial_number)
+               return -ENOMEM;
+
+       gbpsy->type = le16_to_cpu(resp.type);
+       gbpsy->properties_count = resp.properties_count;
+
+       return 0;
+}
+
+static int gb_power_supply_prop_descriptors_get(struct gb_power_supply *gbpsy)
+{
+       struct gb_connection *connection = get_conn_from_psy(gbpsy);
+       struct gb_power_supply_get_property_descriptors_request *req;
+       struct gb_power_supply_get_property_descriptors_response *resp;
+       struct gb_operation *op;
+       u8 props_count = gbpsy->properties_count;
+       enum power_supply_property psp;
+       int ret;
+       int i, r = 0;
+
+       if (props_count == 0)
+               return 0;
+
+       op = gb_operation_create(connection,
+                                GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS,
+                                sizeof(req), sizeof(*resp) + props_count *
+                                sizeof(struct gb_power_supply_props_desc),
+                                GFP_KERNEL);
+       if (!op)
+               return -ENOMEM;
+
+       req = op->request->payload;
+       req->psy_id = gbpsy->id;
+
+       ret = gb_operation_request_send_sync(op);
+       if (ret < 0)
+               goto out_put_operation;
+
+       resp = op->response->payload;
+
+       /* validate received properties */
+       for (i = 0; i < props_count; i++) {
+               ret = get_psp_from_gb_prop(resp->props[i].property, &psp);
+               if (ret < 0) {
+                       dev_warn(&connection->bundle->dev,
+                                "greybus property %u it is not supported by this kernel, dropped\n",
+                                resp->props[i].property);
+                       gbpsy->properties_count--;
+               }
+       }
+
+       gbpsy->props = kcalloc(gbpsy->properties_count, sizeof(*gbpsy->props),
+                             GFP_KERNEL);
+       if (!gbpsy->props) {
+               ret = -ENOMEM;
+               goto out_put_operation;
+       }
+
+       gbpsy->props_raw = kcalloc(gbpsy->properties_count,
+                                  sizeof(*gbpsy->props_raw), GFP_KERNEL);
+       if (!gbpsy->props_raw) {
+               ret = -ENOMEM;
+               goto out_put_operation;
+       }
+
+       /* Store available properties, skip the ones we do not support */
+       for (i = 0; i < props_count; i++) {
+               ret = get_psp_from_gb_prop(resp->props[i].property, &psp);
+               if (ret < 0) {
+                       r++;
+                       continue;
+               }
+               gbpsy->props[i - r].prop = psp;
+               gbpsy->props[i - r].gb_prop = resp->props[i].property;
+               gbpsy->props_raw[i - r] = psp;
+               if (resp->props[i].is_writeable)
+                       gbpsy->props[i - r].is_writeable = true;
+       }
+
+       /*
+        * now append the properties that we already got information in the
+        * get_description operation. (char * ones)
+        */
+       _gb_power_supply_append_props(gbpsy);
+
+       ret = 0;
+out_put_operation:
+       gb_operation_put(op);
+
+       return ret;
+}
+
+static int __gb_power_supply_property_update(struct gb_power_supply *gbpsy,
+                                            enum power_supply_property psp)
+{
+       struct gb_connection *connection = get_conn_from_psy(gbpsy);
+       struct gb_power_supply_prop *prop;
+       struct gb_power_supply_get_property_request req;
+       struct gb_power_supply_get_property_response resp;
+       int val;
+       int ret;
+
+       prop = get_psy_prop(gbpsy, psp);
+       if (!prop)
+               return -EINVAL;
+       req.psy_id = gbpsy->id;
+       req.property = prop->gb_prop;
+
+       ret = gb_operation_sync(connection, GB_POWER_SUPPLY_TYPE_GET_PROPERTY,
+                               &req, sizeof(req), &resp, sizeof(resp));
+       if (ret < 0)
+               return ret;
+
+       val = le32_to_cpu(resp.prop_val);
+       if (val == prop->val)
+               return 0;
+
+       prop->previous_val = prop->val;
+       prop->val = val;
+
+       check_changed(gbpsy, prop);
+
+       return 0;
+}
+
+static int __gb_power_supply_property_get(struct gb_power_supply *gbpsy,
+                                         enum power_supply_property psp,
+                                         union power_supply_propval *val)
+{
+       struct gb_power_supply_prop *prop;
+
+       prop = get_psy_prop(gbpsy, psp);
+       if (!prop)
+               return -EINVAL;
+
+       val->intval = prop->val;
+       return 0;
+}
+
+static int __gb_power_supply_property_strval_get(struct gb_power_supply *gbpsy,
+                                               enum power_supply_property psp,
+                                               union power_supply_propval *val)
+{
+       switch (psp) {
+       case POWER_SUPPLY_PROP_MODEL_NAME:
+               val->strval = gbpsy->model_name;
+               break;
+       case POWER_SUPPLY_PROP_MANUFACTURER:
+               val->strval = gbpsy->manufacturer;
+               break;
+       case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+               val->strval = gbpsy->serial_number;
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int _gb_power_supply_property_get(struct gb_power_supply *gbpsy,
+                                        enum power_supply_property psp,
+                                        union power_supply_propval *val)
+{
+       struct gb_connection *connection = get_conn_from_psy(gbpsy);
+       int ret;
+
+       /*
+        * Properties of type const char *, were already fetched on
+        * get_description operation and should be cached in gb
+        */
+       if (is_prop_valint(psp))
+               ret = __gb_power_supply_property_get(gbpsy, psp, val);
+       else
+               ret = __gb_power_supply_property_strval_get(gbpsy, psp, val);
+
+       if (ret < 0)
+               dev_err(&connection->bundle->dev, "get property %u\n", psp);
+
+       return 0;
+}
+
+static int is_cache_valid(struct gb_power_supply *gbpsy)
+{
+       /* check if cache is good enough or it has expired */
+       if (gbpsy->cache_invalid) {
+               gbpsy->cache_invalid = 0;
+               return 0;
+       }
+
+       if (gbpsy->last_update &&
+           time_is_after_jiffies(gbpsy->last_update +
+                                 msecs_to_jiffies(cache_time)))
+               return 1;
+
+       return 0;
+}
+
+static int gb_power_supply_status_get(struct gb_power_supply *gbpsy)
+{
+       struct gb_connection *connection = get_conn_from_psy(gbpsy);
+       int ret = 0;
+       int i;
+
+       if (is_cache_valid(gbpsy))
+               return 0;
+
+       ret = gb_pm_runtime_get_sync(connection->bundle);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < gbpsy->properties_count; i++) {
+               ret = __gb_power_supply_property_update(gbpsy,
+                                                       gbpsy->props[i].prop);
+               if (ret < 0)
+                       break;
+       }
+
+       if (ret == 0)
+               gbpsy->last_update = jiffies;
+
+       gb_pm_runtime_put_autosuspend(connection->bundle);
+       return ret;
+}
+
+static void gb_power_supply_status_update(struct gb_power_supply *gbpsy)
+{
+       /* check if there a change that need to be reported */
+       gb_power_supply_status_get(gbpsy);
+
+       if (!gbpsy->changed)
+               return;
+
+       gbpsy->update_interval = update_interval_init;
+       __gb_power_supply_changed(gbpsy);
+       gbpsy->changed = false;
+}
+
+static void gb_power_supply_work(struct work_struct *work)
+{
+       struct gb_power_supply *gbpsy = container_of(work,
+                                                    struct gb_power_supply,
+                                                    work.work);
+
+       /*
+        * if the poll interval is not set, disable polling, this is helpful
+        * specially at unregister time.
+        */
+       if (!gbpsy->update_interval)
+               return;
+
+       gb_power_supply_status_update(gbpsy);
+       next_interval(gbpsy);
+       schedule_delayed_work(&gbpsy->work, gbpsy->update_interval);
+}
+
+static int get_property(struct power_supply *b,
+                       enum power_supply_property psp,
+                       union power_supply_propval *val)
+{
+       struct gb_power_supply *gbpsy = to_gb_power_supply(b);
+
+       gb_power_supply_status_get(gbpsy);
+
+       return _gb_power_supply_property_get(gbpsy, psp, val);
+}
+
+static int gb_power_supply_property_set(struct gb_power_supply *gbpsy,
+                                       enum power_supply_property psp,
+                                       int val)
+{
+       struct gb_connection *connection = get_conn_from_psy(gbpsy);
+       struct gb_power_supply_prop *prop;
+       struct gb_power_supply_set_property_request req;
+       int ret;
+
+       ret = gb_pm_runtime_get_sync(connection->bundle);
+       if (ret)
+               return ret;
+
+       prop = get_psy_prop(gbpsy, psp);
+       if (!prop) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       req.psy_id = gbpsy->id;
+       req.property = prop->gb_prop;
+       req.prop_val = cpu_to_le32((s32)val);
+
+       ret = gb_operation_sync(connection, GB_POWER_SUPPLY_TYPE_SET_PROPERTY,
+                               &req, sizeof(req), NULL, 0);
+       if (ret < 0)
+               goto out;
+
+       /* cache immediately the new value */
+       prop->val = val;
+
+out:
+       gb_pm_runtime_put_autosuspend(connection->bundle);
+       return ret;
+}
+
+static int set_property(struct power_supply *b,
+                       enum power_supply_property psp,
+                       const union power_supply_propval *val)
+{
+       struct gb_power_supply *gbpsy = to_gb_power_supply(b);
+
+       return gb_power_supply_property_set(gbpsy, psp, val->intval);
+}
+
+static int property_is_writeable(struct power_supply *b,
+                                enum power_supply_property psp)
+{
+       struct gb_power_supply *gbpsy = to_gb_power_supply(b);
+
+       return is_psy_prop_writeable(gbpsy, psp);
+}
+
+#ifndef CORE_OWNS_PSY_STRUCT
+static int gb_power_supply_register(struct gb_power_supply *gbpsy)
+{
+       struct gb_connection *connection = get_conn_from_psy(gbpsy);
+
+       gbpsy->psy.name                 = gbpsy->name;
+       gbpsy->psy.type                 = gbpsy->type;
+       gbpsy->psy.properties           = gbpsy->props_raw;
+       gbpsy->psy.num_properties       = total_props(gbpsy);
+       gbpsy->psy.get_property         = get_property;
+       gbpsy->psy.set_property         = set_property;
+       gbpsy->psy.property_is_writeable = property_is_writeable;
+
+       return power_supply_register(&connection->bundle->dev,
+                                    &gbpsy->psy);
+}
+#else
+static int gb_power_supply_register(struct gb_power_supply *gbpsy)
+{
+       struct gb_connection *connection = get_conn_from_psy(gbpsy);
+       struct power_supply_config cfg = {};
+
+       cfg.drv_data = gbpsy;
+
+       gbpsy->desc.name                = gbpsy->name;
+       gbpsy->desc.type                = gbpsy->type;
+       gbpsy->desc.properties          = gbpsy->props_raw;
+       gbpsy->desc.num_properties      = total_props(gbpsy);
+       gbpsy->desc.get_property        = get_property;
+       gbpsy->desc.set_property        = set_property;
+       gbpsy->desc.property_is_writeable = property_is_writeable;
+
+       gbpsy->psy = power_supply_register(&connection->bundle->dev,
+                                          &gbpsy->desc, &cfg);
+       return PTR_ERR_OR_ZERO(gbpsy->psy);
+}
+#endif
+
+static void _gb_power_supply_free(struct gb_power_supply *gbpsy)
+{
+       kfree(gbpsy->serial_number);
+       kfree(gbpsy->model_name);
+       kfree(gbpsy->manufacturer);
+       kfree(gbpsy->props_raw);
+       kfree(gbpsy->props);
+}
+
+static void _gb_power_supply_release(struct gb_power_supply *gbpsy)
+{
+       gbpsy->update_interval = 0;
+
+       cancel_delayed_work_sync(&gbpsy->work);
+#ifndef CORE_OWNS_PSY_STRUCT
+       if (gbpsy->registered)
+               power_supply_unregister(&gbpsy->psy);
+#else
+       if (gbpsy->registered)
+               power_supply_unregister(gbpsy->psy);
+#endif
+
+       _gb_power_supply_free(gbpsy);
+}
+
+static void _gb_power_supplies_release(struct gb_power_supplies *supplies)
+{
+       int i;
+
+       if (!supplies->supply)
+               return;
+
+       mutex_lock(&supplies->supplies_lock);
+       for (i = 0; i < supplies->supplies_count; i++)
+               _gb_power_supply_release(&supplies->supply[i]);
+       kfree(supplies->supply);
+       mutex_unlock(&supplies->supplies_lock);
+       kfree(supplies);
+}
+
+static int gb_power_supplies_get_count(struct gb_power_supplies *supplies)
+{
+       struct gb_power_supply_get_supplies_response resp;
+       int ret;
+
+       ret = gb_operation_sync(supplies->connection,
+                               GB_POWER_SUPPLY_TYPE_GET_SUPPLIES,
+                               NULL, 0, &resp, sizeof(resp));
+       if (ret < 0)
+               return ret;
+
+       if  (!resp.supplies_count)
+               return -EINVAL;
+
+       supplies->supplies_count = resp.supplies_count;
+
+       return ret;
+}
+
+static int gb_power_supply_config(struct gb_power_supplies *supplies, int id)
+{
+       struct gb_power_supply *gbpsy = &supplies->supply[id];
+       int ret;
+
+       gbpsy->supplies = supplies;
+       gbpsy->id = id;
+
+       ret = gb_power_supply_description_get(gbpsy);
+       if (ret < 0)
+               return ret;
+
+       return gb_power_supply_prop_descriptors_get(gbpsy);
+}
+
+static int gb_power_supply_enable(struct gb_power_supply *gbpsy)
+{
+       int ret;
+
+       /* guarantee that we have an unique name, before register */
+       ret =  __gb_power_supply_set_name(gbpsy->model_name, gbpsy->name,
+                                         sizeof(gbpsy->name));
+       if (ret < 0)
+               return ret;
+
+       mutex_init(&gbpsy->supply_lock);
+
+       ret = gb_power_supply_register(gbpsy);
+       if (ret < 0)
+               return ret;
+
+       gbpsy->update_interval = update_interval_init;
+       INIT_DELAYED_WORK(&gbpsy->work, gb_power_supply_work);
+       schedule_delayed_work(&gbpsy->work, 0);
+
+       /* everything went fine, mark it for release code to know */
+       gbpsy->registered = true;
+
+       return 0;
+}
+
+static int gb_power_supplies_setup(struct gb_power_supplies *supplies)
+{
+       struct gb_connection *connection = supplies->connection;
+       int ret;
+       int i;
+
+       mutex_lock(&supplies->supplies_lock);
+
+       ret = gb_power_supplies_get_count(supplies);
+       if (ret < 0)
+               goto out;
+
+       supplies->supply = kzalloc(supplies->supplies_count *
+                                    sizeof(struct gb_power_supply),
+                                    GFP_KERNEL);
+
+       if (!supplies->supply) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       for (i = 0; i < supplies->supplies_count; i++) {
+               ret = gb_power_supply_config(supplies, i);
+               if (ret < 0) {
+                       dev_err(&connection->bundle->dev,
+                               "Fail to configure supplies devices\n");
+                       goto out;
+               }
+       }
+out:
+       mutex_unlock(&supplies->supplies_lock);
+       return ret;
+}
+
+static int gb_power_supplies_register(struct gb_power_supplies *supplies)
+{
+       struct gb_connection *connection = supplies->connection;
+       int ret = 0;
+       int i;
+
+       mutex_lock(&supplies->supplies_lock);
+
+       for (i = 0; i < supplies->supplies_count; i++) {
+               ret = gb_power_supply_enable(&supplies->supply[i]);
+               if (ret < 0) {
+                       dev_err(&connection->bundle->dev,
+                               "Fail to enable supplies devices\n");
+                       break;
+               }
+       }
+
+       mutex_unlock(&supplies->supplies_lock);
+       return ret;
+}
+
+static int gb_supplies_request_handler(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gb_power_supplies *supplies = gb_connection_get_data(connection);
+       struct gb_power_supply *gbpsy;
+       struct gb_message *request;
+       struct gb_power_supply_event_request *payload;
+       u8 psy_id;
+       u8 event;
+       int ret = 0;
+
+       if (op->type != GB_POWER_SUPPLY_TYPE_EVENT) {
+               dev_err(&connection->bundle->dev,
+                       "Unsupported unsolicited event: %u\n", op->type);
+               return -EINVAL;
+       }
+
+       request = op->request;
+
+       if (request->payload_size < sizeof(*payload)) {
+               dev_err(&connection->bundle->dev,
+                       "Wrong event size received (%zu < %zu)\n",
+                       request->payload_size, sizeof(*payload));
+               return -EINVAL;
+       }
+
+       payload = request->payload;
+       psy_id = payload->psy_id;
+       mutex_lock(&supplies->supplies_lock);
+       if (psy_id >= supplies->supplies_count ||
+           !supplies->supply[psy_id].registered) {
+               dev_err(&connection->bundle->dev,
+                       "Event received for unconfigured power_supply id: %d\n",
+                       psy_id);
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       event = payload->event;
+       /*
+        * we will only handle events after setup is done and before release is
+        * running. For that just check update_interval.
+        */
+       gbpsy = &supplies->supply[psy_id];
+       if (!gbpsy->update_interval) {
+               ret = -ESHUTDOWN;
+               goto out_unlock;
+       }
+
+       if (event & GB_POWER_SUPPLY_UPDATE) {
+               /*
+                * we need to make sure we invalidate cache, if not no new
+                * values for the properties will be fetch and the all propose
+                * of this event is missed
+                */
+               gbpsy->cache_invalid = 1;
+               gb_power_supply_status_update(gbpsy);
+       }
+
+out_unlock:
+       mutex_unlock(&supplies->supplies_lock);
+       return ret;
+}
+
+static int gb_power_supply_probe(struct gb_bundle *bundle,
+                                const struct greybus_bundle_id *id)
+{
+       struct greybus_descriptor_cport *cport_desc;
+       struct gb_connection *connection;
+       struct gb_power_supplies *supplies;
+       int ret;
+
+       if (bundle->num_cports != 1)
+               return -ENODEV;
+
+       cport_desc = &bundle->cport_desc[0];
+       if (cport_desc->protocol_id != GREYBUS_PROTOCOL_POWER_SUPPLY)
+               return -ENODEV;
+
+       supplies = kzalloc(sizeof(*supplies), GFP_KERNEL);
+       if (!supplies)
+               return -ENOMEM;
+
+       connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+                                         gb_supplies_request_handler);
+       if (IS_ERR(connection)) {
+               ret = PTR_ERR(connection);
+               goto out;
+       }
+
+       supplies->connection = connection;
+       gb_connection_set_data(connection, supplies);
+
+       mutex_init(&supplies->supplies_lock);
+
+       greybus_set_drvdata(bundle, supplies);
+
+       /* We aren't ready to receive an incoming request yet */
+       ret = gb_connection_enable_tx(connection);
+       if (ret)
+               goto error_connection_destroy;
+
+       ret = gb_power_supplies_setup(supplies);
+       if (ret < 0)
+               goto error_connection_disable;
+
+       /* We are ready to receive an incoming request now, enable RX as well */
+       ret = gb_connection_enable(connection);
+       if (ret)
+               goto error_connection_disable;
+
+       ret = gb_power_supplies_register(supplies);
+       if (ret < 0)
+               goto error_connection_disable;
+
+       gb_pm_runtime_put_autosuspend(bundle);
+       return 0;
+
+error_connection_disable:
+       gb_connection_disable(connection);
+error_connection_destroy:
+       gb_connection_destroy(connection);
+out:
+       _gb_power_supplies_release(supplies);
+       return ret;
+}
+
+static void gb_power_supply_disconnect(struct gb_bundle *bundle)
+{
+       struct gb_power_supplies *supplies = greybus_get_drvdata(bundle);
+
+       gb_connection_disable(supplies->connection);
+       gb_connection_destroy(supplies->connection);
+
+       _gb_power_supplies_release(supplies);
+}
+
+static const struct greybus_bundle_id gb_power_supply_id_table[] = {
+       { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_POWER_SUPPLY) },
+       { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_power_supply_id_table);
+
+static struct greybus_driver gb_power_supply_driver = {
+       .name           = "power_supply",
+       .probe          = gb_power_supply_probe,
+       .disconnect     = gb_power_supply_disconnect,
+       .id_table       = gb_power_supply_id_table,
+};
+module_greybus_driver(gb_power_supply_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
new file mode 100644 (file)
index 0000000..c4bf329
--- /dev/null
@@ -0,0 +1,338 @@
+/*
+ * PWM Greybus driver.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pwm.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+
+struct gb_pwm_chip {
+       struct gb_connection    *connection;
+       u8                      pwm_max;        /* max pwm number */
+
+       struct pwm_chip         chip;
+       struct pwm_chip         *pwm;
+};
+#define pwm_chip_to_gb_pwm_chip(chip) \
+       container_of(chip, struct gb_pwm_chip, chip)
+
+
+static int gb_pwm_count_operation(struct gb_pwm_chip *pwmc)
+{
+       struct gb_pwm_count_response response;
+       int ret;
+
+       ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_PWM_COUNT,
+                               NULL, 0, &response, sizeof(response));
+       if (ret)
+               return ret;
+       pwmc->pwm_max = response.count;
+       return 0;
+}
+
+static int gb_pwm_activate_operation(struct gb_pwm_chip *pwmc,
+                                    u8 which)
+{
+       struct gb_pwm_activate_request request;
+       struct gbphy_device *gbphy_dev;
+       int ret;
+
+       if (which > pwmc->pwm_max)
+               return -EINVAL;
+
+       request.which = which;
+
+       gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+       ret = gbphy_runtime_get_sync(gbphy_dev);
+       if (ret)
+               return ret;
+
+       ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_ACTIVATE,
+                               &request, sizeof(request), NULL, 0);
+
+       gbphy_runtime_put_autosuspend(gbphy_dev);
+
+       return ret;
+}
+
+static int gb_pwm_deactivate_operation(struct gb_pwm_chip *pwmc,
+                                      u8 which)
+{
+       struct gb_pwm_deactivate_request request;
+       struct gbphy_device *gbphy_dev;
+       int ret;
+
+       if (which > pwmc->pwm_max)
+               return -EINVAL;
+
+       request.which = which;
+
+       gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+       ret = gbphy_runtime_get_sync(gbphy_dev);
+       if (ret)
+               return ret;
+
+       ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_DEACTIVATE,
+                               &request, sizeof(request), NULL, 0);
+
+       gbphy_runtime_put_autosuspend(gbphy_dev);
+
+       return ret;
+}
+
+static int gb_pwm_config_operation(struct gb_pwm_chip *pwmc,
+                                  u8 which, u32 duty, u32 period)
+{
+       struct gb_pwm_config_request request;
+       struct gbphy_device *gbphy_dev;
+       int ret;
+
+       if (which > pwmc->pwm_max)
+               return -EINVAL;
+
+       request.which = which;
+       request.duty = cpu_to_le32(duty);
+       request.period = cpu_to_le32(period);
+
+       gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+       ret = gbphy_runtime_get_sync(gbphy_dev);
+       if (ret)
+               return ret;
+
+       ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_CONFIG,
+                               &request, sizeof(request), NULL, 0);
+
+       gbphy_runtime_put_autosuspend(gbphy_dev);
+
+       return ret;
+}
+
+static int gb_pwm_set_polarity_operation(struct gb_pwm_chip *pwmc,
+                                        u8 which, u8 polarity)
+{
+       struct gb_pwm_polarity_request request;
+       struct gbphy_device *gbphy_dev;
+       int ret;
+
+       if (which > pwmc->pwm_max)
+               return -EINVAL;
+
+       request.which = which;
+       request.polarity = polarity;
+
+       gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+       ret = gbphy_runtime_get_sync(gbphy_dev);
+       if (ret)
+               return ret;
+
+       ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_POLARITY,
+                               &request, sizeof(request), NULL, 0);
+
+       gbphy_runtime_put_autosuspend(gbphy_dev);
+
+       return ret;
+}
+
+static int gb_pwm_enable_operation(struct gb_pwm_chip *pwmc,
+                                  u8 which)
+{
+       struct gb_pwm_enable_request request;
+       struct gbphy_device *gbphy_dev;
+       int ret;
+
+       if (which > pwmc->pwm_max)
+               return -EINVAL;
+
+       request.which = which;
+
+       gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+       ret = gbphy_runtime_get_sync(gbphy_dev);
+       if (ret)
+               return ret;
+
+       ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_ENABLE,
+                               &request, sizeof(request), NULL, 0);
+       if (ret)
+               gbphy_runtime_put_autosuspend(gbphy_dev);
+
+       return ret;
+}
+
+static int gb_pwm_disable_operation(struct gb_pwm_chip *pwmc,
+                                   u8 which)
+{
+       struct gb_pwm_disable_request request;
+       struct gbphy_device *gbphy_dev;
+       int ret;
+
+       if (which > pwmc->pwm_max)
+               return -EINVAL;
+
+       request.which = which;
+
+       ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_DISABLE,
+                               &request, sizeof(request), NULL, 0);
+
+       gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+       gbphy_runtime_put_autosuspend(gbphy_dev);
+
+       return ret;
+}
+
+static int gb_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+
+       return gb_pwm_activate_operation(pwmc, pwm->hwpwm);
+};
+
+static void gb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+
+       if (pwm_is_enabled(pwm))
+               dev_warn(chip->dev, "freeing PWM device without disabling\n");
+
+       gb_pwm_deactivate_operation(pwmc, pwm->hwpwm);
+}
+
+static int gb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+                        int duty_ns, int period_ns)
+{
+       struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+
+       return gb_pwm_config_operation(pwmc, pwm->hwpwm, duty_ns, period_ns);
+};
+
+static int gb_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
+                              enum pwm_polarity polarity)
+{
+       struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+
+       return gb_pwm_set_polarity_operation(pwmc, pwm->hwpwm, polarity);
+};
+
+static int gb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+
+       return gb_pwm_enable_operation(pwmc, pwm->hwpwm);
+};
+
+static void gb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+
+       gb_pwm_disable_operation(pwmc, pwm->hwpwm);
+};
+
+static const struct pwm_ops gb_pwm_ops = {
+       .request = gb_pwm_request,
+       .free = gb_pwm_free,
+       .config = gb_pwm_config,
+       .set_polarity = gb_pwm_set_polarity,
+       .enable = gb_pwm_enable,
+       .disable = gb_pwm_disable,
+       .owner = THIS_MODULE,
+};
+
+static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
+                       const struct gbphy_device_id *id)
+{
+       struct gb_connection *connection;
+       struct gb_pwm_chip *pwmc;
+       struct pwm_chip *pwm;
+       int ret;
+
+       pwmc = kzalloc(sizeof(*pwmc), GFP_KERNEL);
+       if (!pwmc)
+               return -ENOMEM;
+
+       connection = gb_connection_create(gbphy_dev->bundle,
+                                         le16_to_cpu(gbphy_dev->cport_desc->id),
+                                         NULL);
+       if (IS_ERR(connection)) {
+               ret = PTR_ERR(connection);
+               goto exit_pwmc_free;
+       }
+
+       pwmc->connection = connection;
+       gb_connection_set_data(connection, pwmc);
+       gb_gbphy_set_data(gbphy_dev, pwmc);
+
+       ret = gb_connection_enable(connection);
+       if (ret)
+               goto exit_connection_destroy;
+
+       /* Query number of pwms present */
+       ret = gb_pwm_count_operation(pwmc);
+       if (ret)
+               goto exit_connection_disable;
+
+       pwm = &pwmc->chip;
+
+       pwm->dev = &gbphy_dev->dev;
+       pwm->ops = &gb_pwm_ops;
+       pwm->base = -1;                 /* Allocate base dynamically */
+       pwm->npwm = pwmc->pwm_max + 1;
+       pwm->can_sleep = true;          /* FIXME */
+
+       ret = pwmchip_add(pwm);
+       if (ret) {
+               dev_err(&gbphy_dev->dev,
+                       "failed to register PWM: %d\n", ret);
+               goto exit_connection_disable;
+       }
+
+       gbphy_runtime_put_autosuspend(gbphy_dev);
+       return 0;
+
+exit_connection_disable:
+       gb_connection_disable(connection);
+exit_connection_destroy:
+       gb_connection_destroy(connection);
+exit_pwmc_free:
+       kfree(pwmc);
+       return ret;
+}
+
+static void gb_pwm_remove(struct gbphy_device *gbphy_dev)
+{
+       struct gb_pwm_chip *pwmc = gb_gbphy_get_data(gbphy_dev);
+       struct gb_connection *connection = pwmc->connection;
+       int ret;
+
+       ret = gbphy_runtime_get_sync(gbphy_dev);
+       if (ret)
+               gbphy_runtime_get_noresume(gbphy_dev);
+
+       pwmchip_remove(&pwmc->chip);
+       gb_connection_disable(connection);
+       gb_connection_destroy(connection);
+       kfree(pwmc);
+}
+
+static const struct gbphy_device_id gb_pwm_id_table[] = {
+       { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_PWM) },
+       { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_pwm_id_table);
+
+static struct gbphy_driver pwm_driver = {
+       .name           = "pwm",
+       .probe          = gb_pwm_probe,
+       .remove         = gb_pwm_remove,
+       .id_table       = gb_pwm_id_table,
+};
+
+module_gbphy_driver(pwm_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/raw.c b/drivers/staging/greybus/raw.c
new file mode 100644 (file)
index 0000000..729d258
--- /dev/null
@@ -0,0 +1,381 @@
+/*
+ * Greybus driver for the Raw protocol
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sizes.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/uaccess.h>
+
+#include "greybus.h"
+
+struct gb_raw {
+       struct gb_connection *connection;
+
+       struct list_head list;
+       int list_data;
+       struct mutex list_lock;
+       dev_t dev;
+       struct cdev cdev;
+       struct device *device;
+};
+
+struct raw_data {
+       struct list_head entry;
+       u32 len;
+       u8 data[0];
+};
+
+static struct class *raw_class;
+static int raw_major;
+static const struct file_operations raw_fops;
+static DEFINE_IDA(minors);
+
+/* Number of minor devices this driver supports */
+#define NUM_MINORS     256
+
+/* Maximum size of any one send data buffer we support */
+#define MAX_PACKET_SIZE        (PAGE_SIZE * 2)
+
+/*
+ * Maximum size of the data in the receive buffer we allow before we start to
+ * drop messages on the floor
+ */
+#define MAX_DATA_SIZE  (MAX_PACKET_SIZE * 8)
+
+/*
+ * Add the raw data message to the list of received messages.
+ */
+static int receive_data(struct gb_raw *raw, u32 len, u8 *data)
+{
+       struct raw_data *raw_data;
+       struct device *dev = &raw->connection->bundle->dev;
+       int retval = 0;
+
+       if (len > MAX_PACKET_SIZE) {
+               dev_err(dev, "Too big of a data packet, rejected\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&raw->list_lock);
+       if ((raw->list_data + len) > MAX_DATA_SIZE) {
+               dev_err(dev, "Too much data in receive buffer, now dropping packets\n");
+               retval = -EINVAL;
+               goto exit;
+       }
+
+       raw_data = kmalloc(sizeof(*raw_data) + len, GFP_KERNEL);
+       if (!raw_data) {
+               retval = -ENOMEM;
+               goto exit;
+       }
+
+       raw->list_data += len;
+       raw_data->len = len;
+       memcpy(&raw_data->data[0], data, len);
+
+       list_add_tail(&raw_data->entry, &raw->list);
+exit:
+       mutex_unlock(&raw->list_lock);
+       return retval;
+}
+
+static int gb_raw_request_handler(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct device *dev = &connection->bundle->dev;
+       struct gb_raw *raw = greybus_get_drvdata(connection->bundle);
+       struct gb_raw_send_request *receive;
+       u32 len;
+
+       if (op->type != GB_RAW_TYPE_SEND) {
+               dev_err(dev, "unknown request type 0x%02x\n", op->type);
+               return -EINVAL;
+       }
+
+       /* Verify size of payload */
+       if (op->request->payload_size < sizeof(*receive)) {
+               dev_err(dev, "raw receive request too small (%zu < %zu)\n",
+                       op->request->payload_size, sizeof(*receive));
+               return -EINVAL;
+       }
+       receive = op->request->payload;
+       len = le32_to_cpu(receive->len);
+       if (len != (int)(op->request->payload_size - sizeof(__le32))) {
+               dev_err(dev, "raw receive request wrong size %d vs %d\n", len,
+                       (int)(op->request->payload_size - sizeof(__le32)));
+               return -EINVAL;
+       }
+       if (len == 0) {
+               dev_err(dev, "raw receive request of 0 bytes?\n");
+               return -EINVAL;
+       }
+
+       return receive_data(raw, len, receive->data);
+}
+
+static int gb_raw_send(struct gb_raw *raw, u32 len, const char __user *data)
+{
+       struct gb_connection *connection = raw->connection;
+       struct gb_raw_send_request *request;
+       int retval;
+
+       request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+       if (!request)
+               return -ENOMEM;
+
+       if (copy_from_user(&request->data[0], data, len)) {
+               kfree(request);
+               return -EFAULT;
+       }
+
+       request->len = cpu_to_le32(len);
+
+       retval = gb_operation_sync(connection, GB_RAW_TYPE_SEND,
+                                  request, len + sizeof(*request),
+                                  NULL, 0);
+
+       kfree(request);
+       return retval;
+}
+
+static int gb_raw_probe(struct gb_bundle *bundle,
+                       const struct greybus_bundle_id *id)
+{
+       struct greybus_descriptor_cport *cport_desc;
+       struct gb_connection *connection;
+       struct gb_raw *raw;
+       int retval;
+       int minor;
+
+       if (bundle->num_cports != 1)
+               return -ENODEV;
+
+       cport_desc = &bundle->cport_desc[0];
+       if (cport_desc->protocol_id != GREYBUS_PROTOCOL_RAW)
+               return -ENODEV;
+
+       raw = kzalloc(sizeof(*raw), GFP_KERNEL);
+       if (!raw)
+               return -ENOMEM;
+
+       connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+                                         gb_raw_request_handler);
+       if (IS_ERR(connection)) {
+               retval = PTR_ERR(connection);
+               goto error_free;
+       }
+
+       INIT_LIST_HEAD(&raw->list);
+       mutex_init(&raw->list_lock);
+
+       raw->connection = connection;
+       greybus_set_drvdata(bundle, raw);
+
+       minor = ida_simple_get(&minors, 0, 0, GFP_KERNEL);
+       if (minor < 0) {
+               retval = minor;
+               goto error_connection_destroy;
+       }
+
+       raw->dev = MKDEV(raw_major, minor);
+       cdev_init(&raw->cdev, &raw_fops);
+
+       retval = gb_connection_enable(connection);
+       if (retval)
+               goto error_remove_ida;
+
+       retval = cdev_add(&raw->cdev, raw->dev, 1);
+       if (retval)
+               goto error_connection_disable;
+
+       raw->device = device_create(raw_class, &connection->bundle->dev,
+                                   raw->dev, raw, "gb!raw%d", minor);
+       if (IS_ERR(raw->device)) {
+               retval = PTR_ERR(raw->device);
+               goto error_del_cdev;
+       }
+
+       return 0;
+
+error_del_cdev:
+       cdev_del(&raw->cdev);
+
+error_connection_disable:
+       gb_connection_disable(connection);
+
+error_remove_ida:
+       ida_simple_remove(&minors, minor);
+
+error_connection_destroy:
+       gb_connection_destroy(connection);
+
+error_free:
+       kfree(raw);
+       return retval;
+}
+
+static void gb_raw_disconnect(struct gb_bundle *bundle)
+{
+       struct gb_raw *raw = greybus_get_drvdata(bundle);
+       struct gb_connection *connection = raw->connection;
+       struct raw_data *raw_data;
+       struct raw_data *temp;
+
+       // FIXME - handle removing a connection when the char device node is open.
+       device_destroy(raw_class, raw->dev);
+       cdev_del(&raw->cdev);
+       gb_connection_disable(connection);
+       ida_simple_remove(&minors, MINOR(raw->dev));
+       gb_connection_destroy(connection);
+
+       mutex_lock(&raw->list_lock);
+       list_for_each_entry_safe(raw_data, temp, &raw->list, entry) {
+               list_del(&raw_data->entry);
+               kfree(raw_data);
+       }
+       mutex_unlock(&raw->list_lock);
+
+       kfree(raw);
+}
+
+/*
+ * Character device node interfaces.
+ *
+ * Note, we are using read/write to only allow a single read/write per message.
+ * This means for read(), you have to provide a big enough buffer for the full
+ * message to be copied into.  If the buffer isn't big enough, the read() will
+ * fail with -ENOSPC.
+ */
+
+static int raw_open(struct inode *inode, struct file *file)
+{
+       struct cdev *cdev = inode->i_cdev;
+       struct gb_raw *raw = container_of(cdev, struct gb_raw, cdev);
+
+       file->private_data = raw;
+       return 0;
+}
+
+static ssize_t raw_write(struct file *file, const char __user *buf,
+                        size_t count, loff_t *ppos)
+{
+       struct gb_raw *raw = file->private_data;
+       int retval;
+
+       if (!count)
+               return 0;
+
+       if (count > MAX_PACKET_SIZE)
+               return -E2BIG;
+
+       retval = gb_raw_send(raw, count, buf);
+       if (retval)
+               return retval;
+
+       return count;
+}
+
+static ssize_t raw_read(struct file *file, char __user *buf, size_t count,
+                       loff_t *ppos)
+{
+       struct gb_raw *raw = file->private_data;
+       int retval = 0;
+       struct raw_data *raw_data;
+
+       mutex_lock(&raw->list_lock);
+       if (list_empty(&raw->list))
+               goto exit;
+
+       raw_data = list_first_entry(&raw->list, struct raw_data, entry);
+       if (raw_data->len > count) {
+               retval = -ENOSPC;
+               goto exit;
+       }
+
+       if (copy_to_user(buf, &raw_data->data[0], raw_data->len)) {
+               retval = -EFAULT;
+               goto exit;
+       }
+
+       list_del(&raw_data->entry);
+       raw->list_data -= raw_data->len;
+       retval = raw_data->len;
+       kfree(raw_data);
+
+exit:
+       mutex_unlock(&raw->list_lock);
+       return retval;
+}
+
+static const struct file_operations raw_fops = {
+       .owner          = THIS_MODULE,
+       .write          = raw_write,
+       .read           = raw_read,
+       .open           = raw_open,
+       .llseek         = noop_llseek,
+};
+
+static const struct greybus_bundle_id gb_raw_id_table[] = {
+       { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_RAW) },
+       { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_raw_id_table);
+
+static struct greybus_driver gb_raw_driver = {
+       .name           = "raw",
+       .probe          = gb_raw_probe,
+       .disconnect     = gb_raw_disconnect,
+       .id_table       = gb_raw_id_table,
+};
+
+static int raw_init(void)
+{
+       dev_t dev;
+       int retval;
+
+       raw_class = class_create(THIS_MODULE, "gb_raw");
+       if (IS_ERR(raw_class)) {
+               retval = PTR_ERR(raw_class);
+               goto error_class;
+       }
+
+       retval = alloc_chrdev_region(&dev, 0, NUM_MINORS, "gb_raw");
+       if (retval < 0)
+               goto error_chrdev;
+
+       raw_major = MAJOR(dev);
+
+       retval = greybus_register(&gb_raw_driver);
+       if (retval)
+               goto error_gb;
+
+       return 0;
+
+error_gb:
+       unregister_chrdev_region(dev, NUM_MINORS);
+error_chrdev:
+       class_destroy(raw_class);
+error_class:
+       return retval;
+}
+module_init(raw_init);
+
+static void __exit raw_exit(void)
+{
+       greybus_deregister(&gb_raw_driver);
+       unregister_chrdev_region(MKDEV(raw_major, 0), NUM_MINORS);
+       class_destroy(raw_class);
+       ida_destroy(&minors);
+}
+module_exit(raw_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/scripts/checkpatch.pl b/drivers/staging/greybus/scripts/checkpatch.pl
new file mode 100755 (executable)
index 0000000..9930b73
--- /dev/null
@@ -0,0 +1,5993 @@
+#!/usr/bin/perl -w
+# (c) 2001, Dave Jones. (the file handling bit)
+# (c) 2005, Joel Schopp <jschopp@austin.ibm.com> (the ugly bit)
+# (c) 2007,2008, Andy Whitcroft <apw@uk.ibm.com> (new conditions, test suite)
+# (c) 2008-2010 Andy Whitcroft <apw@canonical.com>
+# Licensed under the terms of the GNU GPL License version 2
+
+use strict;
+use POSIX;
+use File::Basename;
+use Cwd 'abs_path';
+use Term::ANSIColor qw(:constants);
+
+my $P = $0;
+my $D = dirname(abs_path($P));
+
+my $V = '0.32';
+
+use Getopt::Long qw(:config no_auto_abbrev);
+
+my $quiet = 0;
+my $tree = 0;
+my $chk_signoff = 1;
+my $chk_patch = 1;
+my $tst_only;
+my $emacs = 0;
+my $terse = 0;
+my $showfile = 0;
+my $file = 0;
+my $check = 0;
+my $check_orig = 0;
+my $summary = 1;
+my $mailback = 0;
+my $summary_file = 0;
+my $show_types = 0;
+my $fix = 0;
+my $fix_inplace = 0;
+my $root;
+my %debug;
+my %camelcase = ();
+my %use_type = ();
+my @use = ();
+my %ignore_type = ();
+my @ignore = ();
+my $help = 0;
+my $configuration_file = ".checkpatch.conf";
+my $max_line_length = 80;
+my $ignore_perl_version = 0;
+my $minimum_perl_version = 5.10.0;
+my $min_conf_desc_length = 4;
+my $spelling_file = "$D/spelling.txt";
+my $codespell = 0;
+my $codespellfile = "/usr/share/codespell/dictionary.txt";
+my $color = 1;
+
+sub help {
+       my ($exitcode) = @_;
+
+       print << "EOM";
+Usage: $P [OPTION]... [FILE]...
+Version: $V
+
+Options:
+  -q, --quiet                quiet
+  --no-tree                  run without a kernel tree
+  --no-signoff               do not check for 'Signed-off-by' line
+  --patch                    treat FILE as patchfile (default)
+  --emacs                    emacs compile window format
+  --terse                    one line per report
+  --showfile                 emit diffed file position, not input file position
+  -f, --file                 treat FILE as regular source file
+  --subjective, --strict     enable more subjective tests
+  --types TYPE(,TYPE2...)    show only these comma separated message types
+  --ignore TYPE(,TYPE2...)   ignore various comma separated message types
+  --max-line-length=n        set the maximum line length, if exceeded, warn
+  --min-conf-desc-length=n   set the min description length, if shorter, warn
+  --show-types               show the message "types" in the output
+  --root=PATH                PATH to the kernel tree root
+  --no-summary               suppress the per-file summary
+  --mailback                 only produce a report in case of warnings/errors
+  --summary-file             include the filename in summary
+  --debug KEY=[0|1]          turn on/off debugging of KEY, where KEY is one of
+                             'values', 'possible', 'type', and 'attr' (default
+                             is all off)
+  --test-only=WORD           report only warnings/errors containing WORD
+                             literally
+  --fix                      EXPERIMENTAL - may create horrible results
+                             If correctable single-line errors exist, create
+                             "<inputfile>.EXPERIMENTAL-checkpatch-fixes"
+                             with potential errors corrected to the preferred
+                             checkpatch style
+  --fix-inplace              EXPERIMENTAL - may create horrible results
+                             Is the same as --fix, but overwrites the input
+                             file.  It's your fault if there's no backup or git
+  --ignore-perl-version      override checking of perl version.  expect
+                             runtime errors.
+  --codespell                Use the codespell dictionary for spelling/typos
+                             (default:/usr/share/codespell/dictionary.txt)
+  --codespellfile            Use this codespell dictionary
+  --color                    Use colors when output is STDOUT (default: on)
+  -h, --help, --version      display this help and exit
+
+When FILE is - read standard input.
+EOM
+
+       exit($exitcode);
+}
+
+my $conf = which_conf($configuration_file);
+if (-f $conf) {
+       my @conf_args;
+       open(my $conffile, '<', "$conf")
+           or warn "$P: Can't find a readable $configuration_file file $!\n";
+
+       while (<$conffile>) {
+               my $line = $_;
+
+               $line =~ s/\s*\n?$//g;
+               $line =~ s/^\s*//g;
+               $line =~ s/\s+/ /g;
+
+               next if ($line =~ m/^\s*#/);
+               next if ($line =~ m/^\s*$/);
+
+               my @words = split(" ", $line);
+               foreach my $word (@words) {
+                       last if ($word =~ m/^#/);
+                       push (@conf_args, $word);
+               }
+       }
+       close($conffile);
+       unshift(@ARGV, @conf_args) if @conf_args;
+}
+
+GetOptions(
+       'q|quiet+'      => \$quiet,
+       'tree!'         => \$tree,
+       'signoff!'      => \$chk_signoff,
+       'patch!'        => \$chk_patch,
+       'emacs!'        => \$emacs,
+       'terse!'        => \$terse,
+       'showfile!'     => \$showfile,
+       'f|file!'       => \$file,
+       'subjective!'   => \$check,
+       'strict!'       => \$check,
+       'ignore=s'      => \@ignore,
+       'types=s'       => \@use,
+       'show-types!'   => \$show_types,
+       'max-line-length=i' => \$max_line_length,
+       'min-conf-desc-length=i' => \$min_conf_desc_length,
+       'root=s'        => \$root,
+       'summary!'      => \$summary,
+       'mailback!'     => \$mailback,
+       'summary-file!' => \$summary_file,
+       'fix!'          => \$fix,
+       'fix-inplace!'  => \$fix_inplace,
+       'ignore-perl-version!' => \$ignore_perl_version,
+       'debug=s'       => \%debug,
+       'test-only=s'   => \$tst_only,
+       'codespell!'    => \$codespell,
+       'codespellfile=s'       => \$codespellfile,
+       'color!'        => \$color,
+       'h|help'        => \$help,
+       'version'       => \$help
+) or help(1);
+
+help(0) if ($help);
+
+$fix = 1 if ($fix_inplace);
+$check_orig = $check;
+
+my $exit = 0;
+
+if ($^V && $^V lt $minimum_perl_version) {
+       printf "$P: requires at least perl version %vd\n", $minimum_perl_version;
+       if (!$ignore_perl_version) {
+               exit(1);
+       }
+}
+
+if ($#ARGV < 0) {
+       print "$P: no input files\n";
+       exit(1);
+}
+
+sub hash_save_array_words {
+       my ($hashRef, $arrayRef) = @_;
+
+       my @array = split(/,/, join(',', @$arrayRef));
+       foreach my $word (@array) {
+               $word =~ s/\s*\n?$//g;
+               $word =~ s/^\s*//g;
+               $word =~ s/\s+/ /g;
+               $word =~ tr/[a-z]/[A-Z]/;
+
+               next if ($word =~ m/^\s*#/);
+               next if ($word =~ m/^\s*$/);
+
+               $hashRef->{$word}++;
+       }
+}
+
+sub hash_show_words {
+       my ($hashRef, $prefix) = @_;
+
+       if (keys %$hashRef) {
+               print "\nNOTE: $prefix message types:";
+               foreach my $word (sort keys %$hashRef) {
+                       print " $word";
+               }
+               print "\n";
+       }
+}
+
+hash_save_array_words(\%ignore_type, \@ignore);
+hash_save_array_words(\%use_type, \@use);
+
+my $dbg_values = 0;
+my $dbg_possible = 0;
+my $dbg_type = 0;
+my $dbg_attr = 0;
+for my $key (keys %debug) {
+       ## no critic
+       eval "\${dbg_$key} = '$debug{$key}';";
+       die "$@" if ($@);
+}
+
+my $rpt_cleaners = 0;
+
+if ($terse) {
+       $emacs = 1;
+       $quiet++;
+}
+
+if ($tree) {
+       if (defined $root) {
+               if (!top_of_kernel_tree($root)) {
+                       die "$P: $root: --root does not point at a valid tree\n";
+               }
+       } else {
+               if (top_of_kernel_tree('.')) {
+                       $root = '.';
+               } elsif ($0 =~ m@(.*)/scripts/[^/]*$@ &&
+                                               top_of_kernel_tree($1)) {
+                       $root = $1;
+               }
+       }
+
+       if (!defined $root) {
+               print "Must be run from the top-level dir. of a kernel tree\n";
+               exit(2);
+       }
+}
+
+my $emitted_corrupt = 0;
+
+our $Ident     = qr{
+                       [A-Za-z_][A-Za-z\d_]*
+                       (?:\s*\#\#\s*[A-Za-z_][A-Za-z\d_]*)*
+               }x;
+our $Storage   = qr{extern|static|asmlinkage};
+our $Sparse    = qr{
+                       __user|
+                       __kernel|
+                       __force|
+                       __iomem|
+                       __pmem|
+                       __must_check|
+                       __init_refok|
+                       __kprobes|
+                       __ref|
+                       __rcu|
+                       __private
+               }x;
+our $InitAttributePrefix = qr{__(?:mem|cpu|dev|net_|)};
+our $InitAttributeData = qr{$InitAttributePrefix(?:initdata\b)};
+our $InitAttributeConst = qr{$InitAttributePrefix(?:initconst\b)};
+our $InitAttributeInit = qr{$InitAttributePrefix(?:init\b)};
+our $InitAttribute = qr{$InitAttributeData|$InitAttributeConst|$InitAttributeInit};
+
+# Notes to $Attribute:
+# We need \b after 'init' otherwise 'initconst' will cause a false positive in a check
+our $Attribute = qr{
+                       const|
+                       __percpu|
+                       __nocast|
+                       __safe|
+                       __bitwise__|
+                       __packed__|
+                       __packed2__|
+                       __naked|
+                       __maybe_unused|
+                       __always_unused|
+                       __noreturn|
+                       __used|
+                       __cold|
+                       __pure|
+                       __noclone|
+                       __deprecated|
+                       __read_mostly|
+                       __kprobes|
+                       $InitAttribute|
+                       ____cacheline_aligned|
+                       ____cacheline_aligned_in_smp|
+                       ____cacheline_internodealigned_in_smp|
+                       __weak
+                 }x;
+our $Modifier;
+our $Inline    = qr{inline|__always_inline|noinline|__inline|__inline__};
+our $Member    = qr{->$Ident|\.$Ident|\[[^]]*\]};
+our $Lval      = qr{$Ident(?:$Member)*};
+
+our $Int_type  = qr{(?i)llu|ull|ll|lu|ul|l|u};
+our $Binary    = qr{(?i)0b[01]+$Int_type?};
+our $Hex       = qr{(?i)0x[0-9a-f]+$Int_type?};
+our $Int       = qr{[0-9]+$Int_type?};
+our $Octal     = qr{0[0-7]+$Int_type?};
+our $String    = qr{"[X\t]*"};
+our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?};
+our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?};
+our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?};
+our $Float     = qr{$Float_hex|$Float_dec|$Float_int};
+our $Constant  = qr{$Float|$Binary|$Octal|$Hex|$Int};
+our $Assignment        = qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=};
+our $Compare    = qr{<=|>=|==|!=|<|(?<!-)>};
+our $Arithmetic = qr{\+|-|\*|\/|%};
+our $Operators = qr{
+                       <=|>=|==|!=|
+                       =>|->|<<|>>|<|>|!|~|
+                       &&|\|\||,|\^|\+\+|--|&|\||$Arithmetic
+                 }x;
+
+our $c90_Keywords = qr{do|for|while|if|else|return|goto|continue|switch|default|case|break}x;
+
+our $BasicType;
+our $NonptrType;
+our $NonptrTypeMisordered;
+our $NonptrTypeWithAttr;
+our $Type;
+our $TypeMisordered;
+our $Declare;
+our $DeclareMisordered;
+
+our $NON_ASCII_UTF8    = qr{
+       [\xC2-\xDF][\x80-\xBF]               # non-overlong 2-byte
+       |  \xE0[\xA0-\xBF][\x80-\xBF]        # excluding overlongs
+       | [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2}  # straight 3-byte
+       |  \xED[\x80-\x9F][\x80-\xBF]        # excluding surrogates
+       |  \xF0[\x90-\xBF][\x80-\xBF]{2}     # planes 1-3
+       | [\xF1-\xF3][\x80-\xBF]{3}          # planes 4-15
+       |  \xF4[\x80-\x8F][\x80-\xBF]{2}     # plane 16
+}x;
+
+our $UTF8      = qr{
+       [\x09\x0A\x0D\x20-\x7E]              # ASCII
+       | $NON_ASCII_UTF8
+}x;
+
+our $typeC99Typedefs = qr{(?:__)?(?:[us]_?)?int_?(?:8|16|32|64)_t};
+our $typeOtherOSTypedefs = qr{(?x:
+       u_(?:char|short|int|long) |          # bsd
+       u(?:nchar|short|int|long)            # sysv
+)};
+our $typeKernelTypedefs = qr{(?x:
+       (?:__)?(?:u|s|be|le)(?:8|16|32|64)|
+       atomic_t
+)};
+our $typeTypedefs = qr{(?x:
+       $typeC99Typedefs\b|
+       $typeOtherOSTypedefs\b|
+       $typeKernelTypedefs\b
+)};
+
+our $zero_initializer = qr{(?:(?:0[xX])?0+$Int_type?|NULL|false)\b};
+
+our $logFunctions = qr{(?x:
+       printk(?:_ratelimited|_once|)|
+       (?:[a-z0-9]+_){1,2}(?:printk|emerg|alert|crit|err|warning|warn|notice|info|debug|dbg|vdbg|devel|cont|WARN)(?:_ratelimited|_once|)|
+       WARN(?:_RATELIMIT|_ONCE|)|
+       panic|
+       MODULE_[A-Z_]+|
+       seq_vprintf|seq_printf|seq_puts
+)};
+
+our $signature_tags = qr{(?xi:
+       Signed-off-by:|
+       Acked-by:|
+       Tested-by:|
+       Reviewed-by:|
+       Reported-by:|
+       Suggested-by:|
+       To:|
+       Cc:
+)};
+
+our @typeListMisordered = (
+       qr{char\s+(?:un)?signed},
+       qr{int\s+(?:(?:un)?signed\s+)?short\s},
+       qr{int\s+short(?:\s+(?:un)?signed)},
+       qr{short\s+int(?:\s+(?:un)?signed)},
+       qr{(?:un)?signed\s+int\s+short},
+       qr{short\s+(?:un)?signed},
+       qr{long\s+int\s+(?:un)?signed},
+       qr{int\s+long\s+(?:un)?signed},
+       qr{long\s+(?:un)?signed\s+int},
+       qr{int\s+(?:un)?signed\s+long},
+       qr{int\s+(?:un)?signed},
+       qr{int\s+long\s+long\s+(?:un)?signed},
+       qr{long\s+long\s+int\s+(?:un)?signed},
+       qr{long\s+long\s+(?:un)?signed\s+int},
+       qr{long\s+long\s+(?:un)?signed},
+       qr{long\s+(?:un)?signed},
+);
+
+our @typeList = (
+       qr{void},
+       qr{(?:(?:un)?signed\s+)?char},
+       qr{(?:(?:un)?signed\s+)?short\s+int},
+       qr{(?:(?:un)?signed\s+)?short},
+       qr{(?:(?:un)?signed\s+)?int},
+       qr{(?:(?:un)?signed\s+)?long\s+int},
+       qr{(?:(?:un)?signed\s+)?long\s+long\s+int},
+       qr{(?:(?:un)?signed\s+)?long\s+long},
+       qr{(?:(?:un)?signed\s+)?long},
+       qr{(?:un)?signed},
+       qr{float},
+       qr{double},
+       qr{bool},
+       qr{struct\s+$Ident},
+       qr{union\s+$Ident},
+       qr{enum\s+$Ident},
+       qr{${Ident}_t},
+       qr{${Ident}_handler},
+       qr{${Ident}_handler_fn},
+       @typeListMisordered,
+);
+
+our $C90_int_types = qr{(?x:
+       long\s+long\s+int\s+(?:un)?signed|
+       long\s+long\s+(?:un)?signed\s+int|
+       long\s+long\s+(?:un)?signed|
+       (?:(?:un)?signed\s+)?long\s+long\s+int|
+       (?:(?:un)?signed\s+)?long\s+long|
+       int\s+long\s+long\s+(?:un)?signed|
+       int\s+(?:(?:un)?signed\s+)?long\s+long|
+
+       long\s+int\s+(?:un)?signed|
+       long\s+(?:un)?signed\s+int|
+       long\s+(?:un)?signed|
+       (?:(?:un)?signed\s+)?long\s+int|
+       (?:(?:un)?signed\s+)?long|
+       int\s+long\s+(?:un)?signed|
+       int\s+(?:(?:un)?signed\s+)?long|
+
+       int\s+(?:un)?signed|
+       (?:(?:un)?signed\s+)?int
+)};
+
+our @typeListFile = ();
+our @typeListWithAttr = (
+       @typeList,
+       qr{struct\s+$InitAttribute\s+$Ident},
+       qr{union\s+$InitAttribute\s+$Ident},
+);
+
+our @modifierList = (
+       qr{fastcall},
+);
+our @modifierListFile = ();
+
+our @mode_permission_funcs = (
+       ["module_param", 3],
+       ["module_param_(?:array|named|string)", 4],
+       ["module_param_array_named", 5],
+       ["debugfs_create_(?:file|u8|u16|u32|u64|x8|x16|x32|x64|size_t|atomic_t|bool|blob|regset32|u32_array)", 2],
+       ["proc_create(?:_data|)", 2],
+       ["(?:CLASS|DEVICE|SENSOR)_ATTR", 2],
+);
+
+#Create a search pattern for all these functions to speed up a loop below
+our $mode_perms_search = "";
+foreach my $entry (@mode_permission_funcs) {
+       $mode_perms_search .= '|' if ($mode_perms_search ne "");
+       $mode_perms_search .= $entry->[0];
+}
+
+our $mode_perms_world_writable = qr{
+       S_IWUGO         |
+       S_IWOTH         |
+       S_IRWXUGO       |
+       S_IALLUGO       |
+       0[0-7][0-7][2367]
+}x;
+
+our $allowed_asm_includes = qr{(?x:
+       irq|
+       memory|
+       time|
+       reboot
+)};
+# memory.h: ARM has a custom one
+
+# Load common spelling mistakes and build regular expression list.
+my $misspellings;
+my %spelling_fix;
+
+if (open(my $spelling, '<', $spelling_file)) {
+       while (<$spelling>) {
+               my $line = $_;
+
+               $line =~ s/\s*\n?$//g;
+               $line =~ s/^\s*//g;
+
+               next if ($line =~ m/^\s*#/);
+               next if ($line =~ m/^\s*$/);
+
+               my ($suspect, $fix) = split(/\|\|/, $line);
+
+               $spelling_fix{$suspect} = $fix;
+       }
+       close($spelling);
+} else {
+       warn "No typos will be found - file '$spelling_file': $!\n";
+}
+
+if ($codespell) {
+       if (open(my $spelling, '<', $codespellfile)) {
+               while (<$spelling>) {
+                       my $line = $_;
+
+                       $line =~ s/\s*\n?$//g;
+                       $line =~ s/^\s*//g;
+
+                       next if ($line =~ m/^\s*#/);
+                       next if ($line =~ m/^\s*$/);
+                       next if ($line =~ m/, disabled/i);
+
+                       $line =~ s/,.*$//;
+
+                       my ($suspect, $fix) = split(/->/, $line);
+
+                       $spelling_fix{$suspect} = $fix;
+               }
+               close($spelling);
+       } else {
+               warn "No codespell typos will be found - file '$codespellfile': $!\n";
+       }
+}
+
+$misspellings = join("|", sort keys %spelling_fix) if keys %spelling_fix;
+
+sub build_types {
+       my $mods = "(?x:  \n" . join("|\n  ", (@modifierList, @modifierListFile)) . "\n)";
+       my $all = "(?x:  \n" . join("|\n  ", (@typeList, @typeListFile)) . "\n)";
+       my $Misordered = "(?x:  \n" . join("|\n  ", @typeListMisordered) . "\n)";
+       my $allWithAttr = "(?x:  \n" . join("|\n  ", @typeListWithAttr) . "\n)";
+       $Modifier       = qr{(?:$Attribute|$Sparse|$mods)};
+       $BasicType      = qr{
+                               (?:$typeTypedefs\b)|
+                               (?:${all}\b)
+               }x;
+       $NonptrType     = qr{
+                       (?:$Modifier\s+|const\s+)*
+                       (?:
+                               (?:typeof|__typeof__)\s*\([^\)]*\)|
+                               (?:$typeTypedefs\b)|
+                               (?:${all}\b)
+                       )
+                       (?:\s+$Modifier|\s+const)*
+                 }x;
+       $NonptrTypeMisordered   = qr{
+                       (?:$Modifier\s+|const\s+)*
+                       (?:
+                               (?:${Misordered}\b)
+                       )
+                       (?:\s+$Modifier|\s+const)*
+                 }x;
+       $NonptrTypeWithAttr     = qr{
+                       (?:$Modifier\s+|const\s+)*
+                       (?:
+                               (?:typeof|__typeof__)\s*\([^\)]*\)|
+                               (?:$typeTypedefs\b)|
+                               (?:${allWithAttr}\b)
+                       )
+                       (?:\s+$Modifier|\s+const)*
+                 }x;
+       $Type   = qr{
+                       $NonptrType
+                       (?:(?:\s|\*|\[\])+\s*const|(?:\s|\*\s*(?:const\s*)?|\[\])+|(?:\s*\[\s*\])+)?
+                       (?:\s+$Inline|\s+$Modifier)*
+                 }x;
+       $TypeMisordered = qr{
+                       $NonptrTypeMisordered
+                       (?:(?:\s|\*|\[\])+\s*const|(?:\s|\*\s*(?:const\s*)?|\[\])+|(?:\s*\[\s*\])+)?
+                       (?:\s+$Inline|\s+$Modifier)*
+                 }x;
+       $Declare        = qr{(?:$Storage\s+(?:$Inline\s+)?)?$Type};
+       $DeclareMisordered      = qr{(?:$Storage\s+(?:$Inline\s+)?)?$TypeMisordered};
+}
+build_types();
+
+our $Typecast  = qr{\s*(\(\s*$NonptrType\s*\)){0,1}\s*};
+
+# Using $balanced_parens, $LvalOrFunc, or $FuncArg
+# requires at least perl version v5.10.0
+# Any use must be runtime checked with $^V
+
+our $balanced_parens = qr/(\((?:[^\(\)]++|(?-1))*\))/;
+our $LvalOrFunc        = qr{((?:[\&\*]\s*)?$Lval)\s*($balanced_parens{0,1})\s*};
+our $FuncArg = qr{$Typecast{0,1}($LvalOrFunc|$Constant|$String)};
+
+our $declaration_macros = qr{(?x:
+       (?:$Storage\s+)?(?:[A-Z_][A-Z0-9]*_){0,2}(?:DEFINE|DECLARE)(?:_[A-Z0-9]+){1,6}\s*\(|
+       (?:$Storage\s+)?LIST_HEAD\s*\(|
+       (?:$Storage\s+)?${Type}\s+uninitialized_var\s*\(
+)};
+
+sub deparenthesize {
+       my ($string) = @_;
+       return "" if (!defined($string));
+
+       while ($string =~ /^\s*\(.*\)\s*$/) {
+               $string =~ s@^\s*\(\s*@@;
+               $string =~ s@\s*\)\s*$@@;
+       }
+
+       $string =~ s@\s+@ @g;
+
+       return $string;
+}
+
+sub seed_camelcase_file {
+       my ($file) = @_;
+
+       return if (!(-f $file));
+
+       local $/;
+
+       open(my $include_file, '<', "$file")
+           or warn "$P: Can't read '$file' $!\n";
+       my $text = <$include_file>;
+       close($include_file);
+
+       my @lines = split('\n', $text);
+
+       foreach my $line (@lines) {
+               next if ($line !~ /(?:[A-Z][a-z]|[a-z][A-Z])/);
+               if ($line =~ /^[ \t]*(?:#[ \t]*define|typedef\s+$Type)\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)/) {
+                       $camelcase{$1} = 1;
+               } elsif ($line =~ /^\s*$Declare\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)\s*[\(\[,;]/) {
+                       $camelcase{$1} = 1;
+               } elsif ($line =~ /^\s*(?:union|struct|enum)\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)\s*[;\{]/) {
+                       $camelcase{$1} = 1;
+               }
+       }
+}
+
+my $camelcase_seeded = 0;
+sub seed_camelcase_includes {
+       return if ($camelcase_seeded);
+
+       my $files;
+       my $camelcase_cache = "";
+       my @include_files = ();
+
+       $camelcase_seeded = 1;
+
+       if (-e ".git") {
+               my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`;
+               chomp $git_last_include_commit;
+               $camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit";
+       } else {
+               my $last_mod_date = 0;
+               $files = `find $root/include -name "*.h"`;
+               @include_files = split('\n', $files);
+               foreach my $file (@include_files) {
+                       my $date = POSIX::strftime("%Y%m%d%H%M",
+                                                  localtime((stat $file)[9]));
+                       $last_mod_date = $date if ($last_mod_date < $date);
+               }
+               $camelcase_cache = ".checkpatch-camelcase.date.$last_mod_date";
+       }
+
+       if ($camelcase_cache ne "" && -f $camelcase_cache) {
+               open(my $camelcase_file, '<', "$camelcase_cache")
+                   or warn "$P: Can't read '$camelcase_cache' $!\n";
+               while (<$camelcase_file>) {
+                       chomp;
+                       $camelcase{$_} = 1;
+               }
+               close($camelcase_file);
+
+               return;
+       }
+
+       if (-e ".git") {
+               $files = `git ls-files "include/*.h"`;
+               @include_files = split('\n', $files);
+       }
+
+       foreach my $file (@include_files) {
+               seed_camelcase_file($file);
+       }
+
+       if ($camelcase_cache ne "") {
+               unlink glob ".checkpatch-camelcase.*";
+               open(my $camelcase_file, '>', "$camelcase_cache")
+                   or warn "$P: Can't write '$camelcase_cache' $!\n";
+               foreach (sort { lc($a) cmp lc($b) } keys(%camelcase)) {
+                       print $camelcase_file ("$_\n");
+               }
+               close($camelcase_file);
+       }
+}
+
+sub git_commit_info {
+       my ($commit, $id, $desc) = @_;
+
+       return ($id, $desc) if ((which("git") eq "") || !(-e ".git"));
+
+       my $output = `git log --no-color --format='%H %s' -1 $commit 2>&1`;
+       $output =~ s/^\s*//gm;
+       my @lines = split("\n", $output);
+
+       return ($id, $desc) if ($#lines < 0);
+
+       if ($lines[0] =~ /^error: short SHA1 $commit is ambiguous\./) {
+# Maybe one day convert this block of bash into something that returns
+# all matching commit ids, but it's very slow...
+#
+#              echo "checking commits $1..."
+#              git rev-list --remotes | grep -i "^$1" |
+#              while read line ; do
+#                  git log --format='%H %s' -1 $line |
+#                  echo "commit $(cut -c 1-12,41-)"
+#              done
+       } elsif ($lines[0] =~ /^fatal: ambiguous argument '$commit': unknown revision or path not in the working tree\./) {
+       } else {
+               $id = substr($lines[0], 0, 12);
+               $desc = substr($lines[0], 41);
+       }
+
+       return ($id, $desc);
+}
+
+$chk_signoff = 0 if ($file);
+
+my @rawlines = ();
+my @lines = ();
+my @fixed = ();
+my @fixed_inserted = ();
+my @fixed_deleted = ();
+my $fixlinenr = -1;
+
+my $vname;
+for my $filename (@ARGV) {
+       my $FILE;
+       if ($file) {
+               open($FILE, '-|', "diff -u /dev/null $filename") ||
+                       die "$P: $filename: diff failed - $!\n";
+       } elsif ($filename eq '-') {
+               open($FILE, '<&STDIN');
+       } else {
+               open($FILE, '<', "$filename") ||
+                       die "$P: $filename: open failed - $!\n";
+       }
+       if ($filename eq '-') {
+               $vname = 'Your patch';
+       } else {
+               $vname = $filename;
+       }
+       while (<$FILE>) {
+               chomp;
+               push(@rawlines, $_);
+       }
+       close($FILE);
+
+       if ($#ARGV > 0 && $quiet == 0) {
+               print '-' x length($vname) . "\n";
+               print "$vname\n";
+               print '-' x length($vname) . "\n";
+       }
+
+       if (!process($filename)) {
+               $exit = 1;
+       }
+       @rawlines = ();
+       @lines = ();
+       @fixed = ();
+       @fixed_inserted = ();
+       @fixed_deleted = ();
+       $fixlinenr = -1;
+       @modifierListFile = ();
+       @typeListFile = ();
+       build_types();
+}
+
+if (!$quiet) {
+       hash_show_words(\%use_type, "Used");
+       hash_show_words(\%ignore_type, "Ignored");
+
+       if ($^V lt 5.10.0) {
+               print << "EOM"
+
+NOTE: perl $^V is not modern enough to detect all possible issues.
+      An upgrade to at least perl v5.10.0 is suggested.
+EOM
+       }
+       if ($exit) {
+               print << "EOM"
+
+NOTE: If any of the errors are false positives, please report
+      them to the maintainer, see CHECKPATCH in MAINTAINERS.
+EOM
+       }
+}
+
+exit($exit);
+
+sub top_of_kernel_tree {
+       my ($root) = @_;
+
+       my @tree_check = (
+               "COPYING", "CREDITS", "Kbuild", "MAINTAINERS", "Makefile",
+               "README", "Documentation", "arch", "include", "drivers",
+               "fs", "init", "ipc", "kernel", "lib", "scripts",
+       );
+
+       foreach my $check (@tree_check) {
+               if (! -e $root . '/' . $check) {
+                       return 0;
+               }
+       }
+       return 1;
+}
+
+sub parse_email {
+       my ($formatted_email) = @_;
+
+       my $name = "";
+       my $address = "";
+       my $comment = "";
+
+       if ($formatted_email =~ /^(.*)<(\S+\@\S+)>(.*)$/) {
+               $name = $1;
+               $address = $2;
+               $comment = $3 if defined $3;
+       } elsif ($formatted_email =~ /^\s*<(\S+\@\S+)>(.*)$/) {
+               $address = $1;
+               $comment = $2 if defined $2;
+       } elsif ($formatted_email =~ /(\S+\@\S+)(.*)$/) {
+               $address = $1;
+               $comment = $2 if defined $2;
+               $formatted_email =~ s/$address.*$//;
+               $name = $formatted_email;
+               $name = trim($name);
+               $name =~ s/^\"|\"$//g;
+               # If there's a name left after stripping spaces and
+               # leading quotes, and the address doesn't have both
+               # leading and trailing angle brackets, the address
+               # is invalid. ie:
+               #   "joe smith joe@smith.com" bad
+               #   "joe smith <joe@smith.com" bad
+               if ($name ne "" && $address !~ /^<[^>]+>$/) {
+                       $name = "";
+                       $address = "";
+                       $comment = "";
+               }
+       }
+
+       $name = trim($name);
+       $name =~ s/^\"|\"$//g;
+       $address = trim($address);
+       $address =~ s/^\<|\>$//g;
+
+       if ($name =~ /[^\w \-]/i) { ##has "must quote" chars
+               $name =~ s/(?<!\\)"/\\"/g; ##escape quotes
+               $name = "\"$name\"";
+       }
+
+       return ($name, $address, $comment);
+}
+
+sub format_email {
+       my ($name, $address) = @_;
+
+       my $formatted_email;
+
+       $name = trim($name);
+       $name =~ s/^\"|\"$//g;
+       $address = trim($address);
+
+       if ($name =~ /[^\w \-]/i) { ##has "must quote" chars
+               $name =~ s/(?<!\\)"/\\"/g; ##escape quotes
+               $name = "\"$name\"";
+       }
+
+       if ("$name" eq "") {
+               $formatted_email = "$address";
+       } else {
+               $formatted_email = "$name <$address>";
+       }
+
+       return $formatted_email;
+}
+
+sub which {
+       my ($bin) = @_;
+
+       foreach my $path (split(/:/, $ENV{PATH})) {
+               if (-e "$path/$bin") {
+                       return "$path/$bin";
+               }
+       }
+
+       return "";
+}
+
+sub which_conf {
+       my ($conf) = @_;
+
+       foreach my $path (split(/:/, ".:$ENV{HOME}:.scripts")) {
+               if (-e "$path/$conf") {
+                       return "$path/$conf";
+               }
+       }
+
+       return "";
+}
+
+sub expand_tabs {
+       my ($str) = @_;
+
+       my $res = '';
+       my $n = 0;
+       for my $c (split(//, $str)) {
+               if ($c eq "\t") {
+                       $res .= ' ';
+                       $n++;
+                       for (; ($n % 8) != 0; $n++) {
+                               $res .= ' ';
+                       }
+                       next;
+               }
+               $res .= $c;
+               $n++;
+       }
+
+       return $res;
+}
+sub copy_spacing {
+       (my $res = shift) =~ tr/\t/ /c;
+       return $res;
+}
+
+sub line_stats {
+       my ($line) = @_;
+
+       # Drop the diff line leader and expand tabs
+       $line =~ s/^.//;
+       $line = expand_tabs($line);
+
+       # Pick the indent from the front of the line.
+       my ($white) = ($line =~ /^(\s*)/);
+
+       return (length($line), length($white));
+}
+
+my $sanitise_quote = '';
+
+sub sanitise_line_reset {
+       my ($in_comment) = @_;
+
+       if ($in_comment) {
+               $sanitise_quote = '*/';
+       } else {
+               $sanitise_quote = '';
+       }
+}
+sub sanitise_line {
+       my ($line) = @_;
+
+       my $res = '';
+       my $l = '';
+
+       my $qlen = 0;
+       my $off = 0;
+       my $c;
+
+       # Always copy over the diff marker.
+       $res = substr($line, 0, 1);
+
+       for ($off = 1; $off < length($line); $off++) {
+               $c = substr($line, $off, 1);
+
+               # Comments we are wacking completly including the begin
+               # and end, all to $;.
+               if ($sanitise_quote eq '' && substr($line, $off, 2) eq '/*') {
+                       $sanitise_quote = '*/';
+
+                       substr($res, $off, 2, "$;$;");
+                       $off++;
+                       next;
+               }
+               if ($sanitise_quote eq '*/' && substr($line, $off, 2) eq '*/') {
+                       $sanitise_quote = '';
+                       substr($res, $off, 2, "$;$;");
+                       $off++;
+                       next;
+               }
+               if ($sanitise_quote eq '' && substr($line, $off, 2) eq '//') {
+                       $sanitise_quote = '//';
+
+                       substr($res, $off, 2, $sanitise_quote);
+                       $off++;
+                       next;
+               }
+
+               # A \ in a string means ignore the next character.
+               if (($sanitise_quote eq "'" || $sanitise_quote eq '"') &&
+                   $c eq "\\") {
+                       substr($res, $off, 2, 'XX');
+                       $off++;
+                       next;
+               }
+               # Regular quotes.
+               if ($c eq "'" || $c eq '"') {
+                       if ($sanitise_quote eq '') {
+                               $sanitise_quote = $c;
+
+                               substr($res, $off, 1, $c);
+                               next;
+                       } elsif ($sanitise_quote eq $c) {
+                               $sanitise_quote = '';
+                       }
+               }
+
+               #print "c<$c> SQ<$sanitise_quote>\n";
+               if ($off != 0 && $sanitise_quote eq '*/' && $c ne "\t") {
+                       substr($res, $off, 1, $;);
+               } elsif ($off != 0 && $sanitise_quote eq '//' && $c ne "\t") {
+                       substr($res, $off, 1, $;);
+               } elsif ($off != 0 && $sanitise_quote && $c ne "\t") {
+                       substr($res, $off, 1, 'X');
+               } else {
+                       substr($res, $off, 1, $c);
+               }
+       }
+
+       if ($sanitise_quote eq '//') {
+               $sanitise_quote = '';
+       }
+
+       # The pathname on a #include may be surrounded by '<' and '>'.
+       if ($res =~ /^.\s*\#\s*include\s+\<(.*)\>/) {
+               my $clean = 'X' x length($1);
+               $res =~ s@\<.*\>@<$clean>@;
+
+       # The whole of a #error is a string.
+       } elsif ($res =~ /^.\s*\#\s*(?:error|warning)\s+(.*)\b/) {
+               my $clean = 'X' x length($1);
+               $res =~ s@(\#\s*(?:error|warning)\s+).*@$1$clean@;
+       }
+
+       return $res;
+}
+
+sub get_quoted_string {
+       my ($line, $rawline) = @_;
+
+       return "" if ($line !~ m/($String)/g);
+       return substr($rawline, $-[0], $+[0] - $-[0]);
+}
+
+sub ctx_statement_block {
+       my ($linenr, $remain, $off) = @_;
+       my $line = $linenr - 1;
+       my $blk = '';
+       my $soff = $off;
+       my $coff = $off - 1;
+       my $coff_set = 0;
+
+       my $loff = 0;
+
+       my $type = '';
+       my $level = 0;
+       my @stack = ();
+       my $p;
+       my $c;
+       my $len = 0;
+
+       my $remainder;
+       while (1) {
+               @stack = (['', 0]) if ($#stack == -1);
+
+               #warn "CSB: blk<$blk> remain<$remain>\n";
+               # If we are about to drop off the end, pull in more
+               # context.
+               if ($off >= $len) {
+                       for (; $remain > 0; $line++) {
+                               last if (!defined $lines[$line]);
+                               next if ($lines[$line] =~ /^-/);
+                               $remain--;
+                               $loff = $len;
+                               $blk .= $lines[$line] . "\n";
+                               $len = length($blk);
+                               $line++;
+                               last;
+                       }
+                       # Bail if there is no further context.
+                       #warn "CSB: blk<$blk> off<$off> len<$len>\n";
+                       if ($off >= $len) {
+                               last;
+                       }
+                       if ($level == 0 && substr($blk, $off) =~ /^.\s*#\s*define/) {
+                               $level++;
+                               $type = '#';
+                       }
+               }
+               $p = $c;
+               $c = substr($blk, $off, 1);
+               $remainder = substr($blk, $off);
+
+               #warn "CSB: c<$c> type<$type> level<$level> remainder<$remainder> coff_set<$coff_set>\n";
+
+               # Handle nested #if/#else.
+               if ($remainder =~ /^#\s*(?:ifndef|ifdef|if)\s/) {
+                       push(@stack, [ $type, $level ]);
+               } elsif ($remainder =~ /^#\s*(?:else|elif)\b/) {
+                       ($type, $level) = @{$stack[$#stack - 1]};
+               } elsif ($remainder =~ /^#\s*endif\b/) {
+                       ($type, $level) = @{pop(@stack)};
+               }
+
+               # Statement ends at the ';' or a close '}' at the
+               # outermost level.
+               if ($level == 0 && $c eq ';') {
+                       last;
+               }
+
+               # An else is really a conditional as long as its not else if
+               if ($level == 0 && $coff_set == 0 &&
+                               (!defined($p) || $p =~ /(?:\s|\}|\+)/) &&
+                               $remainder =~ /^(else)(?:\s|{)/ &&
+                               $remainder !~ /^else\s+if\b/) {
+                       $coff = $off + length($1) - 1;
+                       $coff_set = 1;
+                       #warn "CSB: mark coff<$coff> soff<$soff> 1<$1>\n";
+                       #warn "[" . substr($blk, $soff, $coff - $soff + 1) . "]\n";
+               }
+
+               if (($type eq '' || $type eq '(') && $c eq '(') {
+                       $level++;
+                       $type = '(';
+               }
+               if ($type eq '(' && $c eq ')') {
+                       $level--;
+                       $type = ($level != 0)? '(' : '';
+
+                       if ($level == 0 && $coff < $soff) {
+                               $coff = $off;
+                               $coff_set = 1;
+                               #warn "CSB: mark coff<$coff>\n";
+                       }
+               }
+               if (($type eq '' || $type eq '{') && $c eq '{') {
+                       $level++;
+                       $type = '{';
+               }
+               if ($type eq '{' && $c eq '}') {
+                       $level--;
+                       $type = ($level != 0)? '{' : '';
+
+                       if ($level == 0) {
+                               if (substr($blk, $off + 1, 1) eq ';') {
+                                       $off++;
+                               }
+                               last;
+                       }
+               }
+               # Preprocessor commands end at the newline unless escaped.
+               if ($type eq '#' && $c eq "\n" && $p ne "\\") {
+                       $level--;
+                       $type = '';
+                       $off++;
+                       last;
+               }
+               $off++;
+       }
+       # We are truly at the end, so shuffle to the next line.
+       if ($off == $len) {
+               $loff = $len + 1;
+               $line++;
+               $remain--;
+       }
+
+       my $statement = substr($blk, $soff, $off - $soff + 1);
+       my $condition = substr($blk, $soff, $coff - $soff + 1);
+
+       #warn "STATEMENT<$statement>\n";
+       #warn "CONDITION<$condition>\n";
+
+       #print "coff<$coff> soff<$off> loff<$loff>\n";
+
+       return ($statement, $condition,
+                       $line, $remain + 1, $off - $loff + 1, $level);
+}
+
+sub statement_lines {
+       my ($stmt) = @_;
+
+       # Strip the diff line prefixes and rip blank lines at start and end.
+       $stmt =~ s/(^|\n)./$1/g;
+       $stmt =~ s/^\s*//;
+       $stmt =~ s/\s*$//;
+
+       my @stmt_lines = ($stmt =~ /\n/g);
+
+       return $#stmt_lines + 2;
+}
+
+sub statement_rawlines {
+       my ($stmt) = @_;
+
+       my @stmt_lines = ($stmt =~ /\n/g);
+
+       return $#stmt_lines + 2;
+}
+
+sub statement_block_size {
+       my ($stmt) = @_;
+
+       $stmt =~ s/(^|\n)./$1/g;
+       $stmt =~ s/^\s*{//;
+       $stmt =~ s/}\s*$//;
+       $stmt =~ s/^\s*//;
+       $stmt =~ s/\s*$//;
+
+       my @stmt_lines = ($stmt =~ /\n/g);
+       my @stmt_statements = ($stmt =~ /;/g);
+
+       my $stmt_lines = $#stmt_lines + 2;
+       my $stmt_statements = $#stmt_statements + 1;
+
+       if ($stmt_lines > $stmt_statements) {
+               return $stmt_lines;
+       } else {
+               return $stmt_statements;
+       }
+}
+
+sub ctx_statement_full {
+       my ($linenr, $remain, $off) = @_;
+       my ($statement, $condition, $level);
+
+       my (@chunks);
+
+       # Grab the first conditional/block pair.
+       ($statement, $condition, $linenr, $remain, $off, $level) =
+                               ctx_statement_block($linenr, $remain, $off);
+       #print "F: c<$condition> s<$statement> remain<$remain>\n";
+       push(@chunks, [ $condition, $statement ]);
+       if (!($remain > 0 && $condition =~ /^\s*(?:\n[+-])?\s*(?:if|else|do)\b/s)) {
+               return ($level, $linenr, @chunks);
+       }
+
+       # Pull in the following conditional/block pairs and see if they
+       # could continue the statement.
+       for (;;) {
+               ($statement, $condition, $linenr, $remain, $off, $level) =
+                               ctx_statement_block($linenr, $remain, $off);
+               #print "C: c<$condition> s<$statement> remain<$remain>\n";
+               last if (!($remain > 0 && $condition =~ /^(?:\s*\n[+-])*\s*(?:else|do)\b/s));
+               #print "C: push\n";
+               push(@chunks, [ $condition, $statement ]);
+       }
+
+       return ($level, $linenr, @chunks);
+}
+
+sub ctx_block_get {
+       my ($linenr, $remain, $outer, $open, $close, $off) = @_;
+       my $line;
+       my $start = $linenr - 1;
+       my $blk = '';
+       my @o;
+       my @c;
+       my @res = ();
+
+       my $level = 0;
+       my @stack = ($level);
+       for ($line = $start; $remain > 0; $line++) {
+               next if ($rawlines[$line] =~ /^-/);
+               $remain--;
+
+               $blk .= $rawlines[$line];
+
+               # Handle nested #if/#else.
+               if ($lines[$line] =~ /^.\s*#\s*(?:ifndef|ifdef|if)\s/) {
+                       push(@stack, $level);
+               } elsif ($lines[$line] =~ /^.\s*#\s*(?:else|elif)\b/) {
+                       $level = $stack[$#stack - 1];
+               } elsif ($lines[$line] =~ /^.\s*#\s*endif\b/) {
+                       $level = pop(@stack);
+               }
+
+               foreach my $c (split(//, $lines[$line])) {
+                       ##print "C<$c>L<$level><$open$close>O<$off>\n";
+                       if ($off > 0) {
+                               $off--;
+                               next;
+                       }
+
+                       if ($c eq $close && $level > 0) {
+                               $level--;
+                               last if ($level == 0);
+                       } elsif ($c eq $open) {
+                               $level++;
+                       }
+               }
+
+               if (!$outer || $level <= 1) {
+                       push(@res, $rawlines[$line]);
+               }
+
+               last if ($level == 0);
+       }
+
+       return ($level, @res);
+}
+sub ctx_block_outer {
+       my ($linenr, $remain) = @_;
+
+       my ($level, @r) = ctx_block_get($linenr, $remain, 1, '{', '}', 0);
+       return @r;
+}
+sub ctx_block {
+       my ($linenr, $remain) = @_;
+
+       my ($level, @r) = ctx_block_get($linenr, $remain, 0, '{', '}', 0);
+       return @r;
+}
+sub ctx_statement {
+       my ($linenr, $remain, $off) = @_;
+
+       my ($level, @r) = ctx_block_get($linenr, $remain, 0, '(', ')', $off);
+       return @r;
+}
+sub ctx_block_level {
+       my ($linenr, $remain) = @_;
+
+       return ctx_block_get($linenr, $remain, 0, '{', '}', 0);
+}
+sub ctx_statement_level {
+       my ($linenr, $remain, $off) = @_;
+
+       return ctx_block_get($linenr, $remain, 0, '(', ')', $off);
+}
+
+sub ctx_locate_comment {
+       my ($first_line, $end_line) = @_;
+
+       # Catch a comment on the end of the line itself.
+       my ($current_comment) = ($rawlines[$end_line - 1] =~ m@.*(/\*.*\*/)\s*(?:\\\s*)?$@);
+       return $current_comment if (defined $current_comment);
+
+       # Look through the context and try and figure out if there is a
+       # comment.
+       my $in_comment = 0;
+       $current_comment = '';
+       for (my $linenr = $first_line; $linenr < $end_line; $linenr++) {
+               my $line = $rawlines[$linenr - 1];
+               #warn "           $line\n";
+               if ($linenr == $first_line and $line =~ m@^.\s*\*@) {
+                       $in_comment = 1;
+               }
+               if ($line =~ m@/\*@) {
+                       $in_comment = 1;
+               }
+               if (!$in_comment && $current_comment ne '') {
+                       $current_comment = '';
+               }
+               $current_comment .= $line . "\n" if ($in_comment);
+               if ($line =~ m@\*/@) {
+                       $in_comment = 0;
+               }
+       }
+
+       chomp($current_comment);
+       return($current_comment);
+}
+sub ctx_has_comment {
+       my ($first_line, $end_line) = @_;
+       my $cmt = ctx_locate_comment($first_line, $end_line);
+
+       ##print "LINE: $rawlines[$end_line - 1 ]\n";
+       ##print "CMMT: $cmt\n";
+
+       return ($cmt ne '');
+}
+
+sub raw_line {
+       my ($linenr, $cnt) = @_;
+
+       my $offset = $linenr - 1;
+       $cnt++;
+
+       my $line;
+       while ($cnt) {
+               $line = $rawlines[$offset++];
+               next if (defined($line) && $line =~ /^-/);
+               $cnt--;
+       }
+
+       return $line;
+}
+
+sub cat_vet {
+       my ($vet) = @_;
+       my ($res, $coded);
+
+       $res = '';
+       while ($vet =~ /([^[:cntrl:]]*)([[:cntrl:]]|$)/g) {
+               $res .= $1;
+               if ($2 ne '') {
+                       $coded = sprintf("^%c", unpack('C', $2) + 64);
+                       $res .= $coded;
+               }
+       }
+       $res =~ s/$/\$/;
+
+       return $res;
+}
+
+my $av_preprocessor = 0;
+my $av_pending;
+my @av_paren_type;
+my $av_pend_colon;
+
+sub annotate_reset {
+       $av_preprocessor = 0;
+       $av_pending = '_';
+       @av_paren_type = ('E');
+       $av_pend_colon = 'O';
+}
+
+sub annotate_values {
+       my ($stream, $type) = @_;
+
+       my $res;
+       my $var = '_' x length($stream);
+       my $cur = $stream;
+
+       print "$stream\n" if ($dbg_values > 1);
+
+       while (length($cur)) {
+               @av_paren_type = ('E') if ($#av_paren_type < 0);
+               print " <" . join('', @av_paren_type) .
+                               "> <$type> <$av_pending>" if ($dbg_values > 1);
+               if ($cur =~ /^(\s+)/o) {
+                       print "WS($1)\n" if ($dbg_values > 1);
+                       if ($1 =~ /\n/ && $av_preprocessor) {
+                               $type = pop(@av_paren_type);
+                               $av_preprocessor = 0;
+                       }
+
+               } elsif ($cur =~ /^(\(\s*$Type\s*)\)/ && $av_pending eq '_') {
+                       print "CAST($1)\n" if ($dbg_values > 1);
+                       push(@av_paren_type, $type);
+                       $type = 'c';
+
+               } elsif ($cur =~ /^($Type)\s*(?:$Ident|,|\)|\(|\s*$)/) {
+                       print "DECLARE($1)\n" if ($dbg_values > 1);
+                       $type = 'T';
+
+               } elsif ($cur =~ /^($Modifier)\s*/) {
+                       print "MODIFIER($1)\n" if ($dbg_values > 1);
+                       $type = 'T';
+
+               } elsif ($cur =~ /^(\#\s*define\s*$Ident)(\(?)/o) {
+                       print "DEFINE($1,$2)\n" if ($dbg_values > 1);
+                       $av_preprocessor = 1;
+                       push(@av_paren_type, $type);
+                       if ($2 ne '') {
+                               $av_pending = 'N';
+                       }
+                       $type = 'E';
+
+               } elsif ($cur =~ /^(\#\s*(?:undef\s*$Ident|include\b))/o) {
+                       print "UNDEF($1)\n" if ($dbg_values > 1);
+                       $av_preprocessor = 1;
+                       push(@av_paren_type, $type);
+
+               } elsif ($cur =~ /^(\#\s*(?:ifdef|ifndef|if))/o) {
+                       print "PRE_START($1)\n" if ($dbg_values > 1);
+                       $av_preprocessor = 1;
+
+                       push(@av_paren_type, $type);
+                       push(@av_paren_type, $type);
+                       $type = 'E';
+
+               } elsif ($cur =~ /^(\#\s*(?:else|elif))/o) {
+                       print "PRE_RESTART($1)\n" if ($dbg_values > 1);
+                       $av_preprocessor = 1;
+
+                       push(@av_paren_type, $av_paren_type[$#av_paren_type]);
+
+                       $type = 'E';
+
+               } elsif ($cur =~ /^(\#\s*(?:endif))/o) {
+                       print "PRE_END($1)\n" if ($dbg_values > 1);
+
+                       $av_preprocessor = 1;
+
+                       # Assume all arms of the conditional end as this
+                       # one does, and continue as if the #endif was not here.
+                       pop(@av_paren_type);
+                       push(@av_paren_type, $type);
+                       $type = 'E';
+
+               } elsif ($cur =~ /^(\\\n)/o) {
+                       print "PRECONT($1)\n" if ($dbg_values > 1);
+
+               } elsif ($cur =~ /^(__attribute__)\s*\(?/o) {
+                       print "ATTR($1)\n" if ($dbg_values > 1);
+                       $av_pending = $type;
+                       $type = 'N';
+
+               } elsif ($cur =~ /^(sizeof)\s*(\()?/o) {
+                       print "SIZEOF($1)\n" if ($dbg_values > 1);
+                       if (defined $2) {
+                               $av_pending = 'V';
+                       }
+                       $type = 'N';
+
+               } elsif ($cur =~ /^(if|while|for)\b/o) {
+                       print "COND($1)\n" if ($dbg_values > 1);
+                       $av_pending = 'E';
+                       $type = 'N';
+
+               } elsif ($cur =~/^(case)/o) {
+                       print "CASE($1)\n" if ($dbg_values > 1);
+                       $av_pend_colon = 'C';
+                       $type = 'N';
+
+               } elsif ($cur =~/^(return|else|goto|typeof|__typeof__)\b/o) {
+                       print "KEYWORD($1)\n" if ($dbg_values > 1);
+                       $type = 'N';
+
+               } elsif ($cur =~ /^(\()/o) {
+                       print "PAREN('$1')\n" if ($dbg_values > 1);
+                       push(@av_paren_type, $av_pending);
+                       $av_pending = '_';
+                       $type = 'N';
+
+               } elsif ($cur =~ /^(\))/o) {
+                       my $new_type = pop(@av_paren_type);
+                       if ($new_type ne '_') {
+                               $type = $new_type;
+                               print "PAREN('$1') -> $type\n"
+                                                       if ($dbg_values > 1);
+                       } else {
+                               print "PAREN('$1')\n" if ($dbg_values > 1);
+                       }
+
+               } elsif ($cur =~ /^($Ident)\s*\(/o) {
+                       print "FUNC($1)\n" if ($dbg_values > 1);
+                       $type = 'V';
+                       $av_pending = 'V';
+
+               } elsif ($cur =~ /^($Ident\s*):(?:\s*\d+\s*(,|=|;))?/) {
+                       if (defined $2 && $type eq 'C' || $type eq 'T') {
+                               $av_pend_colon = 'B';
+                       } elsif ($type eq 'E') {
+                               $av_pend_colon = 'L';
+                       }
+                       print "IDENT_COLON($1,$type>$av_pend_colon)\n" if ($dbg_values > 1);
+                       $type = 'V';
+
+               } elsif ($cur =~ /^($Ident|$Constant)/o) {
+                       print "IDENT($1)\n" if ($dbg_values > 1);
+                       $type = 'V';
+
+               } elsif ($cur =~ /^($Assignment)/o) {
+                       print "ASSIGN($1)\n" if ($dbg_values > 1);
+                       $type = 'N';
+
+               } elsif ($cur =~/^(;|{|})/) {
+                       print "END($1)\n" if ($dbg_values > 1);
+                       $type = 'E';
+                       $av_pend_colon = 'O';
+
+               } elsif ($cur =~/^(,)/) {
+                       print "COMMA($1)\n" if ($dbg_values > 1);
+                       $type = 'C';
+
+               } elsif ($cur =~ /^(\?)/o) {
+                       print "QUESTION($1)\n" if ($dbg_values > 1);
+                       $type = 'N';
+
+               } elsif ($cur =~ /^(:)/o) {
+                       print "COLON($1,$av_pend_colon)\n" if ($dbg_values > 1);
+
+                       substr($var, length($res), 1, $av_pend_colon);
+                       if ($av_pend_colon eq 'C' || $av_pend_colon eq 'L') {
+                               $type = 'E';
+                       } else {
+                               $type = 'N';
+                       }
+                       $av_pend_colon = 'O';
+
+               } elsif ($cur =~ /^(\[)/o) {
+                       print "CLOSE($1)\n" if ($dbg_values > 1);
+                       $type = 'N';
+
+               } elsif ($cur =~ /^(-(?![->])|\+(?!\+)|\*|\&\&|\&)/o) {
+                       my $variant;
+
+                       print "OPV($1)\n" if ($dbg_values > 1);
+                       if ($type eq 'V') {
+                               $variant = 'B';
+                       } else {
+                               $variant = 'U';
+                       }
+
+                       substr($var, length($res), 1, $variant);
+                       $type = 'N';
+
+               } elsif ($cur =~ /^($Operators)/o) {
+                       print "OP($1)\n" if ($dbg_values > 1);
+                       if ($1 ne '++' && $1 ne '--') {
+                               $type = 'N';
+                       }
+
+               } elsif ($cur =~ /(^.)/o) {
+                       print "C($1)\n" if ($dbg_values > 1);
+               }
+               if (defined $1) {
+                       $cur = substr($cur, length($1));
+                       $res .= $type x length($1);
+               }
+       }
+
+       return ($res, $var);
+}
+
+sub possible {
+       my ($possible, $line) = @_;
+       my $notPermitted = qr{(?:
+               ^(?:
+                       $Modifier|
+                       $Storage|
+                       $Type|
+                       DEFINE_\S+
+               )$|
+               ^(?:
+                       goto|
+                       return|
+                       case|
+                       else|
+                       asm|__asm__|
+                       do|
+                       \#|
+                       \#\#|
+               )(?:\s|$)|
+               ^(?:typedef|struct|enum)\b
+           )}x;
+       warn "CHECK<$possible> ($line)\n" if ($dbg_possible > 2);
+       if ($possible !~ $notPermitted) {
+               # Check for modifiers.
+               $possible =~ s/\s*$Storage\s*//g;
+               $possible =~ s/\s*$Sparse\s*//g;
+               if ($possible =~ /^\s*$/) {
+
+               } elsif ($possible =~ /\s/) {
+                       $possible =~ s/\s*$Type\s*//g;
+                       for my $modifier (split(' ', $possible)) {
+                               if ($modifier !~ $notPermitted) {
+                                       warn "MODIFIER: $modifier ($possible) ($line)\n" if ($dbg_possible);
+                                       push(@modifierListFile, $modifier);
+                               }
+                       }
+
+               } else {
+                       warn "POSSIBLE: $possible ($line)\n" if ($dbg_possible);
+                       push(@typeListFile, $possible);
+               }
+               build_types();
+       } else {
+               warn "NOTPOSS: $possible ($line)\n" if ($dbg_possible > 1);
+       }
+}
+
+my $prefix = '';
+
+sub show_type {
+       my ($type) = @_;
+
+       return defined $use_type{$type} if (scalar keys %use_type > 0);
+
+       return !defined $ignore_type{$type};
+}
+
+sub report {
+       my ($level, $type, $msg) = @_;
+
+       if (!show_type($type) ||
+           (defined $tst_only && $msg !~ /\Q$tst_only\E/)) {
+               return 0;
+       }
+       my $output = '';
+       if (-t STDOUT && $color) {
+               if ($level eq 'ERROR') {
+                       $output .= RED;
+               } elsif ($level eq 'WARNING') {
+                       $output .= YELLOW;
+               } else {
+                       $output .= GREEN;
+               }
+       }
+       $output .= $prefix . $level . ':';
+       if ($show_types) {
+               $output .= BLUE if (-t STDOUT && $color);
+               $output .= "$type:";
+       }
+       $output .= RESET if (-t STDOUT && $color);
+       $output .= ' ' . $msg . "\n";
+
+       if ($showfile) {
+               my @lines = split("\n", $output, -1);
+               splice(@lines, 1, 1);
+               $output = join("\n", @lines);
+       }
+       $output = (split('\n', $output))[0] . "\n" if ($terse);
+
+       push(our @report, $output);
+
+       return 1;
+}
+
+sub report_dump {
+       our @report;
+}
+
+sub fixup_current_range {
+       my ($lineRef, $offset, $length) = @_;
+
+       if ($$lineRef =~ /^\@\@ -\d+,\d+ \+(\d+),(\d+) \@\@/) {
+               my $o = $1;
+               my $l = $2;
+               my $no = $o + $offset;
+               my $nl = $l + $length;
+               $$lineRef =~ s/\+$o,$l \@\@/\+$no,$nl \@\@/;
+       }
+}
+
+sub fix_inserted_deleted_lines {
+       my ($linesRef, $insertedRef, $deletedRef) = @_;
+
+       my $range_last_linenr = 0;
+       my $delta_offset = 0;
+
+       my $old_linenr = 0;
+       my $new_linenr = 0;
+
+       my $next_insert = 0;
+       my $next_delete = 0;
+
+       my @lines = ();
+
+       my $inserted = @{$insertedRef}[$next_insert++];
+       my $deleted = @{$deletedRef}[$next_delete++];
+
+       foreach my $old_line (@{$linesRef}) {
+               my $save_line = 1;
+               my $line = $old_line;   #don't modify the array
+               if ($line =~ /^(?:\+\+\+|\-\-\-)\s+\S+/) {      #new filename
+                       $delta_offset = 0;
+               } elsif ($line =~ /^\@\@ -\d+,\d+ \+\d+,\d+ \@\@/) {    #new hunk
+                       $range_last_linenr = $new_linenr;
+                       fixup_current_range(\$line, $delta_offset, 0);
+               }
+
+               while (defined($deleted) && ${$deleted}{'LINENR'} == $old_linenr) {
+                       $deleted = @{$deletedRef}[$next_delete++];
+                       $save_line = 0;
+                       fixup_current_range(\$lines[$range_last_linenr], $delta_offset--, -1);
+               }
+
+               while (defined($inserted) && ${$inserted}{'LINENR'} == $old_linenr) {
+                       push(@lines, ${$inserted}{'LINE'});
+                       $inserted = @{$insertedRef}[$next_insert++];
+                       $new_linenr++;
+                       fixup_current_range(\$lines[$range_last_linenr], $delta_offset++, 1);
+               }
+
+               if ($save_line) {
+                       push(@lines, $line);
+                       $new_linenr++;
+               }
+
+               $old_linenr++;
+       }
+
+       return @lines;
+}
+
+sub fix_insert_line {
+       my ($linenr, $line) = @_;
+
+       my $inserted = {
+               LINENR => $linenr,
+               LINE => $line,
+       };
+       push(@fixed_inserted, $inserted);
+}
+
+sub fix_delete_line {
+       my ($linenr, $line) = @_;
+
+       my $deleted = {
+               LINENR => $linenr,
+               LINE => $line,
+       };
+
+       push(@fixed_deleted, $deleted);
+}
+
+sub ERROR {
+       my ($type, $msg) = @_;
+
+       if (report("ERROR", $type, $msg)) {
+               our $clean = 0;
+               our $cnt_error++;
+               return 1;
+       }
+       return 0;
+}
+sub WARN {
+       my ($type, $msg) = @_;
+
+       if (report("WARNING", $type, $msg)) {
+               our $clean = 0;
+               our $cnt_warn++;
+               return 1;
+       }
+       return 0;
+}
+sub CHK {
+       my ($type, $msg) = @_;
+
+       if ($check && report("CHECK", $type, $msg)) {
+               our $clean = 0;
+               our $cnt_chk++;
+               return 1;
+       }
+       return 0;
+}
+
+sub check_absolute_file {
+       my ($absolute, $herecurr) = @_;
+       my $file = $absolute;
+
+       ##print "absolute<$absolute>\n";
+
+       # See if any suffix of this path is a path within the tree.
+       while ($file =~ s@^[^/]*/@@) {
+               if (-f "$root/$file") {
+                       ##print "file<$file>\n";
+                       last;
+               }
+       }
+       if (! -f _)  {
+               return 0;
+       }
+
+       # It is, so see if the prefix is acceptable.
+       my $prefix = $absolute;
+       substr($prefix, -length($file)) = '';
+
+       ##print "prefix<$prefix>\n";
+       if ($prefix ne ".../") {
+               WARN("USE_RELATIVE_PATH",
+                    "use relative pathname instead of absolute in changelog text\n" . $herecurr);
+       }
+}
+
+sub trim {
+       my ($string) = @_;
+
+       $string =~ s/^\s+|\s+$//g;
+
+       return $string;
+}
+
+sub ltrim {
+       my ($string) = @_;
+
+       $string =~ s/^\s+//;
+
+       return $string;
+}
+
+sub rtrim {
+       my ($string) = @_;
+
+       $string =~ s/\s+$//;
+
+       return $string;
+}
+
+sub string_find_replace {
+       my ($string, $find, $replace) = @_;
+
+       $string =~ s/$find/$replace/g;
+
+       return $string;
+}
+
+sub tabify {
+       my ($leading) = @_;
+
+       my $source_indent = 8;
+       my $max_spaces_before_tab = $source_indent - 1;
+       my $spaces_to_tab = " " x $source_indent;
+
+       #convert leading spaces to tabs
+       1 while $leading =~ s@^([\t]*)$spaces_to_tab@$1\t@g;
+       #Remove spaces before a tab
+       1 while $leading =~ s@^([\t]*)( {1,$max_spaces_before_tab})\t@$1\t@g;
+
+       return "$leading";
+}
+
+sub pos_last_openparen {
+       my ($line) = @_;
+
+       my $pos = 0;
+
+       my $opens = $line =~ tr/\(/\(/;
+       my $closes = $line =~ tr/\)/\)/;
+
+       my $last_openparen = 0;
+
+       if (($opens == 0) || ($closes >= $opens)) {
+               return -1;
+       }
+
+       my $len = length($line);
+
+       for ($pos = 0; $pos < $len; $pos++) {
+               my $string = substr($line, $pos);
+               if ($string =~ /^($FuncArg|$balanced_parens)/) {
+                       $pos += length($1) - 1;
+               } elsif (substr($line, $pos, 1) eq '(') {
+                       $last_openparen = $pos;
+               } elsif (index($string, '(') == -1) {
+                       last;
+               }
+       }
+
+       return length(expand_tabs(substr($line, 0, $last_openparen))) + 1;
+}
+
+sub process {
+       my $filename = shift;
+
+       my $linenr=0;
+       my $prevline="";
+       my $prevrawline="";
+       my $stashline="";
+       my $stashrawline="";
+
+       my $length;
+       my $indent;
+       my $previndent=0;
+       my $stashindent=0;
+
+       our $clean = 1;
+       my $signoff = 0;
+       my $is_patch = 0;
+       my $in_header_lines = $file ? 0 : 1;
+       my $in_commit_log = 0;          #Scanning lines before patch
+       my $commit_log_possible_stack_dump = 0;
+       my $commit_log_long_line = 0;
+       my $commit_log_has_diff = 0;
+       my $reported_maintainer_file = 0;
+       my $non_utf8_charset = 0;
+
+       my $last_blank_line = 0;
+       my $last_coalesced_string_linenr = -1;
+
+       our @report = ();
+       our $cnt_lines = 0;
+       our $cnt_error = 0;
+       our $cnt_warn = 0;
+       our $cnt_chk = 0;
+
+       # Trace the real file/line as we go.
+       my $realfile = '';
+       my $realline = 0;
+       my $realcnt = 0;
+       my $here = '';
+       my $in_comment = 0;
+       my $comment_edge = 0;
+       my $first_line = 0;
+       my $p1_prefix = '';
+
+       my $prev_values = 'E';
+
+       # suppression flags
+       my %suppress_ifbraces;
+       my %suppress_whiletrailers;
+       my %suppress_export;
+       my $suppress_statement = 0;
+
+       my %signatures = ();
+
+       # Pre-scan the patch sanitizing the lines.
+       # Pre-scan the patch looking for any __setup documentation.
+       #
+       my @setup_docs = ();
+       my $setup_docs = 0;
+
+       my $camelcase_file_seeded = 0;
+
+       sanitise_line_reset();
+       my $line;
+       foreach my $rawline (@rawlines) {
+               $linenr++;
+               $line = $rawline;
+
+               push(@fixed, $rawline) if ($fix);
+
+               if ($rawline=~/^\+\+\+\s+(\S+)/) {
+                       $setup_docs = 0;
+                       if ($1 =~ m@Documentation/kernel-parameters.txt$@) {
+                               $setup_docs = 1;
+                       }
+                       #next;
+               }
+               if ($rawline=~/^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) {
+                       $realline=$1-1;
+                       if (defined $2) {
+                               $realcnt=$3+1;
+                       } else {
+                               $realcnt=1+1;
+                       }
+                       $in_comment = 0;
+
+                       # Guestimate if this is a continuing comment.  Run
+                       # the context looking for a comment "edge".  If this
+                       # edge is a close comment then we must be in a comment
+                       # at context start.
+                       my $edge;
+                       my $cnt = $realcnt;
+                       for (my $ln = $linenr + 1; $cnt > 0; $ln++) {
+                               next if (defined $rawlines[$ln - 1] &&
+                                        $rawlines[$ln - 1] =~ /^-/);
+                               $cnt--;
+                               #print "RAW<$rawlines[$ln - 1]>\n";
+                               last if (!defined $rawlines[$ln - 1]);
+                               if ($rawlines[$ln - 1] =~ m@(/\*|\*/)@ &&
+                                   $rawlines[$ln - 1] !~ m@"[^"]*(?:/\*|\*/)[^"]*"@) {
+                                       ($edge) = $1;
+                                       last;
+                               }
+                       }
+                       if (defined $edge && $edge eq '*/') {
+                               $in_comment = 1;
+                       }
+
+                       # Guestimate if this is a continuing comment.  If this
+                       # is the start of a diff block and this line starts
+                       # ' *' then it is very likely a comment.
+                       if (!defined $edge &&
+                           $rawlines[$linenr] =~ m@^.\s*(?:\*\*+| \*)(?:\s|$)@)
+                       {
+                               $in_comment = 1;
+                       }
+
+                       ##print "COMMENT:$in_comment edge<$edge> $rawline\n";
+                       sanitise_line_reset($in_comment);
+
+               } elsif ($realcnt && $rawline =~ /^(?:\+| |$)/) {
+                       # Standardise the strings and chars within the input to
+                       # simplify matching -- only bother with positive lines.
+                       $line = sanitise_line($rawline);
+               }
+               push(@lines, $line);
+
+               if ($realcnt > 1) {
+                       $realcnt-- if ($line =~ /^(?:\+| |$)/);
+               } else {
+                       $realcnt = 0;
+               }
+
+               #print "==>$rawline\n";
+               #print "-->$line\n";
+
+               if ($setup_docs && $line =~ /^\+/) {
+                       push(@setup_docs, $line);
+               }
+       }
+
+       $prefix = '';
+
+       $realcnt = 0;
+       $linenr = 0;
+       $fixlinenr = -1;
+       foreach my $line (@lines) {
+               $linenr++;
+               $fixlinenr++;
+               my $sline = $line;      #copy of $line
+               $sline =~ s/$;/ /g;     #with comments as spaces
+
+               my $rawline = $rawlines[$linenr - 1];
+
+#extract the line range in the file after the patch is applied
+               if (!$in_commit_log &&
+                   $line =~ /^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) {
+                       $is_patch = 1;
+                       $first_line = $linenr + 1;
+                       $realline=$1-1;
+                       if (defined $2) {
+                               $realcnt=$3+1;
+                       } else {
+                               $realcnt=1+1;
+                       }
+                       annotate_reset();
+                       $prev_values = 'E';
+
+                       %suppress_ifbraces = ();
+                       %suppress_whiletrailers = ();
+                       %suppress_export = ();
+                       $suppress_statement = 0;
+                       next;
+
+# track the line number as we move through the hunk, note that
+# new versions of GNU diff omit the leading space on completely
+# blank context lines so we need to count that too.
+               } elsif ($line =~ /^( |\+|$)/) {
+                       $realline++;
+                       $realcnt-- if ($realcnt != 0);
+
+                       # Measure the line length and indent.
+                       ($length, $indent) = line_stats($rawline);
+
+                       # Track the previous line.
+                       ($prevline, $stashline) = ($stashline, $line);
+                       ($previndent, $stashindent) = ($stashindent, $indent);
+                       ($prevrawline, $stashrawline) = ($stashrawline, $rawline);
+
+                       #warn "line<$line>\n";
+
+               } elsif ($realcnt == 1) {
+                       $realcnt--;
+               }
+
+               my $hunk_line = ($realcnt != 0);
+
+               $here = "#$linenr: " if (!$file);
+               $here = "#$realline: " if ($file);
+
+               my $found_file = 0;
+               # extract the filename as it passes
+               if ($line =~ /^diff --git.*?(\S+)$/) {
+                       $realfile = $1;
+                       $realfile =~ s@^([^/]*)/@@ if (!$file);
+                       $in_commit_log = 0;
+                       $found_file = 1;
+               } elsif ($line =~ /^\+\+\+\s+(\S+)/) {
+                       $realfile = $1;
+                       $realfile =~ s@^([^/]*)/@@ if (!$file);
+                       $in_commit_log = 0;
+
+                       $p1_prefix = $1;
+                       if (!$file && $tree && $p1_prefix ne '' &&
+                           -e "$root/$p1_prefix") {
+                               WARN("PATCH_PREFIX",
+                                    "patch prefix '$p1_prefix' exists, appears to be a -p0 patch\n");
+                       }
+
+                       if ($realfile =~ m@^include/asm/@) {
+                               ERROR("MODIFIED_INCLUDE_ASM",
+                                     "do not modify files in include/asm, change architecture specific files in include/asm-<architecture>\n" . "$here$rawline\n");
+                       }
+                       $found_file = 1;
+               }
+
+#make up the handle for any error we report on this line
+               if ($showfile) {
+                       $prefix = "$realfile:$realline: "
+               } elsif ($emacs) {
+                       if ($file) {
+                               $prefix = "$filename:$realline: ";
+                       } else {
+                               $prefix = "$filename:$linenr: ";
+                       }
+               }
+
+               if ($found_file) {
+                       if ($realfile =~ m@^(?:drivers/net/|net/|drivers/staging/)@) {
+                               $check = 1;
+                       } else {
+                               $check = $check_orig;
+                       }
+                       next;
+               }
+
+               $here .= "FILE: $realfile:$realline:" if ($realcnt != 0);
+
+               my $hereline = "$here\n$rawline\n";
+               my $herecurr = "$here\n$rawline\n";
+               my $hereprev = "$here\n$prevrawline\n$rawline\n";
+
+               $cnt_lines++ if ($realcnt != 0);
+
+# Check if the commit log has what seems like a diff which can confuse patch
+               if ($in_commit_log && !$commit_log_has_diff &&
+                   (($line =~ m@^\s+diff\b.*a/[\w/]+@ &&
+                     $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) ||
+                    $line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ ||
+                    $line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) {
+                       ERROR("DIFF_IN_COMMIT_MSG",
+                             "Avoid using diff content in the commit message - patch(1) might not work\n" . $herecurr);
+                       $commit_log_has_diff = 1;
+               }
+
+# Check for incorrect file permissions
+               if ($line =~ /^new (file )?mode.*[7531]\d{0,2}$/) {
+                       my $permhere = $here . "FILE: $realfile\n";
+                       if ($realfile !~ m@scripts/@ &&
+                           $realfile !~ /\.(py|pl|awk|sh)$/) {
+                               ERROR("EXECUTE_PERMISSIONS",
+                                     "do not set execute permissions for source files\n" . $permhere);
+                       }
+               }
+
+# Check the patch for a signoff:
+               if ($line =~ /^\s*signed-off-by:/i) {
+                       $signoff++;
+                       $in_commit_log = 0;
+               }
+
+# Check if MAINTAINERS is being updated.  If so, there's probably no need to
+# emit the "does MAINTAINERS need updating?" message on file add/move/delete
+               if ($line =~ /^\s*MAINTAINERS\s*\|/) {
+                       $reported_maintainer_file = 1;
+               }
+
+# Check signature styles
+               if (!$in_header_lines &&
+                   $line =~ /^(\s*)([a-z0-9_-]+by:|$signature_tags)(\s*)(.*)/i) {
+                       my $space_before = $1;
+                       my $sign_off = $2;
+                       my $space_after = $3;
+                       my $email = $4;
+                       my $ucfirst_sign_off = ucfirst(lc($sign_off));
+
+                       if ($sign_off !~ /$signature_tags/) {
+                               WARN("BAD_SIGN_OFF",
+                                    "Non-standard signature: $sign_off\n" . $herecurr);
+                       }
+                       if (defined $space_before && $space_before ne "") {
+                               if (WARN("BAD_SIGN_OFF",
+                                        "Do not use whitespace before $ucfirst_sign_off\n" . $herecurr) &&
+                                   $fix) {
+                                       $fixed[$fixlinenr] =
+                                           "$ucfirst_sign_off $email";
+                               }
+                       }
+                       if ($sign_off =~ /-by:$/i && $sign_off ne $ucfirst_sign_off) {
+                               if (WARN("BAD_SIGN_OFF",
+                                        "'$ucfirst_sign_off' is the preferred signature form\n" . $herecurr) &&
+                                   $fix) {
+                                       $fixed[$fixlinenr] =
+                                           "$ucfirst_sign_off $email";
+                               }
+
+                       }
+                       if (!defined $space_after || $space_after ne " ") {
+                               if (WARN("BAD_SIGN_OFF",
+                                        "Use a single space after $ucfirst_sign_off\n" . $herecurr) &&
+                                   $fix) {
+                                       $fixed[$fixlinenr] =
+                                           "$ucfirst_sign_off $email";
+                               }
+                       }
+
+                       my ($email_name, $email_address, $comment) = parse_email($email);
+                       my $suggested_email = format_email(($email_name, $email_address));
+                       if ($suggested_email eq "") {
+                               ERROR("BAD_SIGN_OFF",
+                                     "Unrecognized email address: '$email'\n" . $herecurr);
+                       } else {
+                               my $dequoted = $suggested_email;
+                               $dequoted =~ s/^"//;
+                               $dequoted =~ s/" </ </;
+                               # Don't force email to have quotes
+                               # Allow just an angle bracketed address
+                               if ("$dequoted$comment" ne $email &&
+                                   "<$email_address>$comment" ne $email &&
+                                   "$suggested_email$comment" ne $email) {
+                                       WARN("BAD_SIGN_OFF",
+                                            "email address '$email' might be better as '$suggested_email$comment'\n" . $herecurr);
+                               }
+                       }
+
+# Check for duplicate signatures
+                       my $sig_nospace = $line;
+                       $sig_nospace =~ s/\s//g;
+                       $sig_nospace = lc($sig_nospace);
+                       if (defined $signatures{$sig_nospace}) {
+                               WARN("BAD_SIGN_OFF",
+                                    "Duplicate signature\n" . $herecurr);
+                       } else {
+                               $signatures{$sig_nospace} = 1;
+                       }
+               }
+
+# Check email subject for common tools that don't need to be mentioned
+               if ($in_header_lines &&
+                   $line =~ /^Subject:.*\b(?:checkpatch|sparse|smatch)\b[^:]/i) {
+                       WARN("EMAIL_SUBJECT",
+                            "A patch subject line should describe the change not the tool that found it\n" . $herecurr);
+               }
+
+# Check for old stable address
+               if ($line =~ /^\s*cc:\s*.*<?\bstable\@kernel\.org\b>?.*$/i) {
+                       ERROR("STABLE_ADDRESS",
+                             "The 'stable' address should be 'stable\@vger.kernel.org'\n" . $herecurr);
+               }
+
+# Check for unwanted Gerrit info
+               if ($in_commit_log && $line =~ /^\s*change-id:/i) {
+                       ERROR("GERRIT_CHANGE_ID",
+                             "Remove Gerrit Change-Id's before submitting upstream.\n" . $herecurr);
+               }
+
+# Check if the commit log is in a possible stack dump
+               if ($in_commit_log && !$commit_log_possible_stack_dump &&
+                   ($line =~ /^\s*(?:WARNING:|BUG:)/ ||
+                    $line =~ /^\s*\[\s*\d+\.\d{6,6}\s*\]/ ||
+                                       # timestamp
+                    $line =~ /^\s*\[\<[0-9a-fA-F]{8,}\>\]/)) {
+                                       # stack dump address
+                       $commit_log_possible_stack_dump = 1;
+               }
+
+# Check for line lengths > 75 in commit log, warn once
+               if ($in_commit_log && !$commit_log_long_line &&
+                   length($line) > 75 &&
+                   !($line =~ /^\s*[a-zA-Z0-9_\/\.]+\s+\|\s+\d+/ ||
+                                       # file delta changes
+                     $line =~ /^\s*(?:[\w\.\-]+\/)++[\w\.\-]+:/ ||
+                                       # filename then :
+                     $line =~ /^\s*(?:Fixes:|Link:)/i ||
+                                       # A Fixes: or Link: line
+                     $commit_log_possible_stack_dump)) {
+                       WARN("COMMIT_LOG_LONG_LINE",
+                            "Possible unwrapped commit description (prefer a maximum 75 chars per line)\n" . $herecurr);
+                       $commit_log_long_line = 1;
+               }
+
+# Reset possible stack dump if a blank line is found
+               if ($in_commit_log && $commit_log_possible_stack_dump &&
+                   $line =~ /^\s*$/) {
+                       $commit_log_possible_stack_dump = 0;
+               }
+
+# Check for git id commit length and improperly formed commit descriptions
+               if ($in_commit_log && !$commit_log_possible_stack_dump &&
+                   ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
+                    ($line =~ /\b[0-9a-f]{12,40}\b/i &&
+                     $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&
+                     $line !~ /\bfixes:\s*[0-9a-f]{12,40}/i))) {
+                       my $init_char = "c";
+                       my $orig_commit = "";
+                       my $short = 1;
+                       my $long = 0;
+                       my $case = 1;
+                       my $space = 1;
+                       my $hasdesc = 0;
+                       my $hasparens = 0;
+                       my $id = '0123456789ab';
+                       my $orig_desc = "commit description";
+                       my $description = "";
+
+                       if ($line =~ /\b(c)ommit\s+([0-9a-f]{5,})\b/i) {
+                               $init_char = $1;
+                               $orig_commit = lc($2);
+                       } elsif ($line =~ /\b([0-9a-f]{12,40})\b/i) {
+                               $orig_commit = lc($1);
+                       }
+
+                       $short = 0 if ($line =~ /\bcommit\s+[0-9a-f]{12,40}/i);
+                       $long = 1 if ($line =~ /\bcommit\s+[0-9a-f]{41,}/i);
+                       $space = 0 if ($line =~ /\bcommit [0-9a-f]/i);
+                       $case = 0 if ($line =~ /\b[Cc]ommit\s+[0-9a-f]{5,40}[^A-F]/);
+                       if ($line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("([^"]+)"\)/i) {
+                               $orig_desc = $1;
+                               $hasparens = 1;
+                       } elsif ($line =~ /\bcommit\s+[0-9a-f]{5,}\s*$/i &&
+                                defined $rawlines[$linenr] &&
+                                $rawlines[$linenr] =~ /^\s*\("([^"]+)"\)/) {
+                               $orig_desc = $1;
+                               $hasparens = 1;
+                       } elsif ($line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("[^"]+$/i &&
+                                defined $rawlines[$linenr] &&
+                                $rawlines[$linenr] =~ /^\s*[^"]+"\)/) {
+                               $line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("([^"]+)$/i;
+                               $orig_desc = $1;
+                               $rawlines[$linenr] =~ /^\s*([^"]+)"\)/;
+                               $orig_desc .= " " . $1;
+                               $hasparens = 1;
+                       }
+
+                       ($id, $description) = git_commit_info($orig_commit,
+                                                             $id, $orig_desc);
+
+                       if ($short || $long || $space || $case || ($orig_desc ne $description) || !$hasparens) {
+                               ERROR("GIT_COMMIT_ID",
+                                     "Please use git commit description style 'commit <12+ chars of sha1> (\"<title line>\")' - ie: '${init_char}ommit $id (\"$description\")'\n" . $herecurr);
+                       }
+               }
+
+# Check for added, moved or deleted files
+               if (!$reported_maintainer_file && !$in_commit_log &&
+                   ($line =~ /^(?:new|deleted) file mode\s*\d+\s*$/ ||
+                    $line =~ /^rename (?:from|to) [\w\/\.\-]+\s*$/ ||
+                    ($line =~ /\{\s*([\w\/\.\-]*)\s*\=\>\s*([\w\/\.\-]*)\s*\}/ &&
+                     (defined($1) || defined($2))))) {
+                       $reported_maintainer_file = 1;
+                       WARN("FILE_PATH_CHANGES",
+                            "added, moved or deleted file(s), does MAINTAINERS need updating?\n" . $herecurr);
+               }
+
+# Check for wrappage within a valid hunk of the file
+               if ($realcnt != 0 && $line !~ m{^(?:\+|-| |\\ No newline|$)}) {
+                       ERROR("CORRUPTED_PATCH",
+                             "patch seems to be corrupt (line wrapped?)\n" .
+                               $herecurr) if (!$emitted_corrupt++);
+               }
+
+# Check for absolute kernel paths.
+               if ($tree) {
+                       while ($line =~ m{(?:^|\s)(/\S*)}g) {
+                               my $file = $1;
+
+                               if ($file =~ m{^(.*?)(?::\d+)+:?$} &&
+                                   check_absolute_file($1, $herecurr)) {
+                                       #
+                               } else {
+                                       check_absolute_file($file, $herecurr);
+                               }
+                       }
+               }
+
+# UTF-8 regex found at http://www.w3.org/International/questions/qa-forms-utf-8.en.php
+               if (($realfile =~ /^$/ || $line =~ /^\+/) &&
+                   $rawline !~ m/^$UTF8*$/) {
+                       my ($utf8_prefix) = ($rawline =~ /^($UTF8*)/);
+
+                       my $blank = copy_spacing($rawline);
+                       my $ptr = substr($blank, 0, length($utf8_prefix)) . "^";
+                       my $hereptr = "$hereline$ptr\n";
+
+                       CHK("INVALID_UTF8",
+                           "Invalid UTF-8, patch and commit message should be encoded in UTF-8\n" . $hereptr);
+               }
+
+# Check if it's the start of a commit log
+# (not a header line and we haven't seen the patch filename)
+               if ($in_header_lines && $realfile =~ /^$/ &&
+                   !($rawline =~ /^\s+\S/ ||
+                     $rawline =~ /^(commit\b|from\b|[\w-]+:).*$/i)) {
+                       $in_header_lines = 0;
+                       $in_commit_log = 1;
+               }
+
+# Check if there is UTF-8 in a commit log when a mail header has explicitly
+# declined it, i.e defined some charset where it is missing.
+               if ($in_header_lines &&
+                   $rawline =~ /^Content-Type:.+charset="(.+)".*$/ &&
+                   $1 !~ /utf-8/i) {
+                       $non_utf8_charset = 1;
+               }
+
+               if ($in_commit_log && $non_utf8_charset && $realfile =~ /^$/ &&
+                   $rawline =~ /$NON_ASCII_UTF8/) {
+                       WARN("UTF8_BEFORE_PATCH",
+                           "8-bit UTF-8 used in possible commit log\n" . $herecurr);
+               }
+
+# Check for various typo / spelling mistakes
+               if (defined($misspellings) &&
+                   ($in_commit_log || $line =~ /^(?:\+|Subject:)/i)) {
+                       while ($rawline =~ /(?:^|[^a-z@])($misspellings)(?:\b|$|[^a-z@])/gi) {
+                               my $typo = $1;
+                               my $typo_fix = $spelling_fix{lc($typo)};
+                               $typo_fix = ucfirst($typo_fix) if ($typo =~ /^[A-Z]/);
+                               $typo_fix = uc($typo_fix) if ($typo =~ /^[A-Z]+$/);
+                               my $msg_type = \&WARN;
+                               $msg_type = \&CHK if ($file);
+                               if (&{$msg_type}("TYPO_SPELLING",
+                                                "'$typo' may be misspelled - perhaps '$typo_fix'?\n" . $herecurr) &&
+                                   $fix) {
+                                       $fixed[$fixlinenr] =~ s/(^|[^A-Za-z@])($typo)($|[^A-Za-z@])/$1$typo_fix$3/;
+                               }
+                       }
+               }
+
+# ignore non-hunk lines and lines being removed
+               next if (!$hunk_line || $line =~ /^-/);
+
+#trailing whitespace
+               if ($line =~ /^\+.*\015/) {
+                       my $herevet = "$here\n" . cat_vet($rawline) . "\n";
+                       if (ERROR("DOS_LINE_ENDINGS",
+                                 "DOS line endings\n" . $herevet) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/[\s\015]+$//;
+                       }
+               } elsif ($rawline =~ /^\+.*\S\s+$/ || $rawline =~ /^\+\s+$/) {
+                       my $herevet = "$here\n" . cat_vet($rawline) . "\n";
+                       if (ERROR("TRAILING_WHITESPACE",
+                                 "trailing whitespace\n" . $herevet) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/\s+$//;
+                       }
+
+                       $rpt_cleaners = 1;
+               }
+
+# Check for FSF mailing addresses.
+               if ($rawline =~ /\bwrite to the Free/i ||
+                   $rawline =~ /\b59\s+Temple\s+Pl/i ||
+                   $rawline =~ /\b51\s+Franklin\s+St/i) {
+                       my $herevet = "$here\n" . cat_vet($rawline) . "\n";
+                       my $msg_type = \&ERROR;
+                       $msg_type = \&CHK if ($file);
+                       &{$msg_type}("FSF_MAILING_ADDRESS",
+                                    "Do not include the paragraph about writing to the Free Software Foundation's mailing address from the sample GPL notice. The FSF has changed addresses in the past, and may do so again. Linux already includes a copy of the GPL.\n" . $herevet)
+               }
+
+# check for Kconfig help text having a real description
+# Only applies when adding the entry originally, after that we do not have
+# sufficient context to determine whether it is indeed long enough.
+               if ($realfile =~ /Kconfig/ &&
+                   $line =~ /^\+\s*config\s+/) {
+                       my $length = 0;
+                       my $cnt = $realcnt;
+                       my $ln = $linenr + 1;
+                       my $f;
+                       my $is_start = 0;
+                       my $is_end = 0;
+                       for (; $cnt > 0 && defined $lines[$ln - 1]; $ln++) {
+                               $f = $lines[$ln - 1];
+                               $cnt-- if ($lines[$ln - 1] !~ /^-/);
+                               $is_end = $lines[$ln - 1] =~ /^\+/;
+
+                               next if ($f =~ /^-/);
+                               last if (!$file && $f =~ /^\@\@/);
+
+                               if ($lines[$ln - 1] =~ /^\+\s*(?:bool|tristate)\s*\"/) {
+                                       $is_start = 1;
+                               } elsif ($lines[$ln - 1] =~ /^\+\s*(?:---)?help(?:---)?$/) {
+                                       $length = -1;
+                               }
+
+                               $f =~ s/^.//;
+                               $f =~ s/#.*//;
+                               $f =~ s/^\s+//;
+                               next if ($f =~ /^$/);
+                               if ($f =~ /^\s*config\s/) {
+                                       $is_end = 1;
+                                       last;
+                               }
+                               $length++;
+                       }
+                       if ($is_start && $is_end && $length < $min_conf_desc_length) {
+                               WARN("CONFIG_DESCRIPTION",
+                                    "please write a paragraph that describes the config symbol fully\n" . $herecurr);
+                       }
+                       #print "is_start<$is_start> is_end<$is_end> length<$length>\n";
+               }
+
+# discourage the addition of CONFIG_EXPERIMENTAL in Kconfig.
+               if ($realfile =~ /Kconfig/ &&
+                   $line =~ /.\s*depends on\s+.*\bEXPERIMENTAL\b/) {
+                       WARN("CONFIG_EXPERIMENTAL",
+                            "Use of CONFIG_EXPERIMENTAL is deprecated. For alternatives, see https://lkml.org/lkml/2012/10/23/580\n");
+               }
+
+# discourage the use of boolean for type definition attributes of Kconfig options
+               if ($realfile =~ /Kconfig/ &&
+                   $line =~ /^\+\s*\bboolean\b/) {
+                       WARN("CONFIG_TYPE_BOOLEAN",
+                            "Use of boolean is deprecated, please use bool instead.\n" . $herecurr);
+               }
+
+               if (($realfile =~ /Makefile.*/ || $realfile =~ /Kbuild.*/) &&
+                   ($line =~ /\+(EXTRA_[A-Z]+FLAGS).*/)) {
+                       my $flag = $1;
+                       my $replacement = {
+                               'EXTRA_AFLAGS' =>   'asflags-y',
+                               'EXTRA_CFLAGS' =>   'ccflags-y',
+                               'EXTRA_CPPFLAGS' => 'cppflags-y',
+                               'EXTRA_LDFLAGS' =>  'ldflags-y',
+                       };
+
+                       WARN("DEPRECATED_VARIABLE",
+                            "Use of $flag is deprecated, please use \`$replacement->{$flag} instead.\n" . $herecurr) if ($replacement->{$flag});
+               }
+
+# check for DT compatible documentation
+               if (defined $root &&
+                       (($realfile =~ /\.dtsi?$/ && $line =~ /^\+\s*compatible\s*=\s*\"/) ||
+                        ($realfile =~ /\.[ch]$/ && $line =~ /^\+.*\.compatible\s*=\s*\"/))) {
+
+                       my @compats = $rawline =~ /\"([a-zA-Z0-9\-\,\.\+_]+)\"/g;
+
+                       my $dt_path = $root . "/Documentation/devicetree/bindings/";
+                       my $vp_file = $dt_path . "vendor-prefixes.txt";
+
+                       foreach my $compat (@compats) {
+                               my $compat2 = $compat;
+                               $compat2 =~ s/\,[a-zA-Z0-9]*\-/\,<\.\*>\-/;
+                               my $compat3 = $compat;
+                               $compat3 =~ s/\,([a-z]*)[0-9]*\-/\,$1<\.\*>\-/;
+                               `grep -Erq "$compat|$compat2|$compat3" $dt_path`;
+                               if ( $? >> 8 ) {
+                                       WARN("UNDOCUMENTED_DT_STRING",
+                                            "DT compatible string \"$compat\" appears un-documented -- check $dt_path\n" . $herecurr);
+                               }
+
+                               next if $compat !~ /^([a-zA-Z0-9\-]+)\,/;
+                               my $vendor = $1;
+                               `grep -Eq "^$vendor\\b" $vp_file`;
+                               if ( $? >> 8 ) {
+                                       WARN("UNDOCUMENTED_DT_STRING",
+                                            "DT compatible string vendor \"$vendor\" appears un-documented -- check $vp_file\n" . $herecurr);
+                               }
+                       }
+               }
+
+# check we are in a valid source file if not then ignore this hunk
+               next if ($realfile !~ /\.(h|c|s|S|pl|sh|dtsi|dts)$/);
+
+# line length limit (with some exclusions)
+#
+# There are a few types of lines that may extend beyond $max_line_length:
+#      logging functions like pr_info that end in a string
+#      lines with a single string
+#      #defines that are a single string
+#
+# There are 3 different line length message types:
+# LONG_LINE_COMMENT    a comment starts before but extends beyond $max_linelength
+# LONG_LINE_STRING     a string starts before but extends beyond $max_line_length
+# LONG_LINE            all other lines longer than $max_line_length
+#
+# if LONG_LINE is ignored, the other 2 types are also ignored
+#
+
+               if ($line =~ /^\+/ && $length > $max_line_length) {
+                       my $msg_type = "LONG_LINE";
+
+                       # Check the allowed long line types first
+
+                       # logging functions that end in a string that starts
+                       # before $max_line_length
+                       if ($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(?:KERN_\S+\s*|[^"]*))?($String\s*(?:|,|\)\s*;)\s*)$/ &&
+                           length(expand_tabs(substr($line, 1, length($line) - length($1) - 1))) <= $max_line_length) {
+                               $msg_type = "";
+
+                       # lines with only strings (w/ possible termination)
+                       # #defines with only strings
+                       } elsif ($line =~ /^\+\s*$String\s*(?:\s*|,|\)\s*;)\s*$/ ||
+                                $line =~ /^\+\s*#\s*define\s+\w+\s+$String$/) {
+                               $msg_type = "";
+
+                       # Otherwise set the alternate message types
+
+                       # a comment starts before $max_line_length
+                       } elsif ($line =~ /($;[\s$;]*)$/ &&
+                                length(expand_tabs(substr($line, 1, length($line) - length($1) - 1))) <= $max_line_length) {
+                               $msg_type = "LONG_LINE_COMMENT"
+
+                       # a quoted string starts before $max_line_length
+                       } elsif ($sline =~ /\s*($String(?:\s*(?:\\|,\s*|\)\s*;\s*))?)$/ &&
+                                length(expand_tabs(substr($line, 1, length($line) - length($1) - 1))) <= $max_line_length) {
+                               $msg_type = "LONG_LINE_STRING"
+                       }
+
+                       if ($msg_type ne "" &&
+                           (show_type("LONG_LINE") || show_type($msg_type))) {
+                               WARN($msg_type,
+                                    "line over $max_line_length characters\n" . $herecurr);
+                       }
+               }
+
+# check for adding lines without a newline.
+               if ($line =~ /^\+/ && defined $lines[$linenr] && $lines[$linenr] =~ /^\\ No newline at end of file/) {
+                       WARN("MISSING_EOF_NEWLINE",
+                            "adding a line without newline at end of file\n" . $herecurr);
+               }
+
+# Blackfin: use hi/lo macros
+               if ($realfile =~ m@arch/blackfin/.*\.S$@) {
+                       if ($line =~ /\.[lL][[:space:]]*=.*&[[:space:]]*0x[fF][fF][fF][fF]/) {
+                               my $herevet = "$here\n" . cat_vet($line) . "\n";
+                               ERROR("LO_MACRO",
+                                     "use the LO() macro, not (... & 0xFFFF)\n" . $herevet);
+                       }
+                       if ($line =~ /\.[hH][[:space:]]*=.*>>[[:space:]]*16/) {
+                               my $herevet = "$here\n" . cat_vet($line) . "\n";
+                               ERROR("HI_MACRO",
+                                     "use the HI() macro, not (... >> 16)\n" . $herevet);
+                       }
+               }
+
+# check we are in a valid source file C or perl if not then ignore this hunk
+               next if ($realfile !~ /\.(h|c|pl|dtsi|dts)$/);
+
+# at the beginning of a line any tabs must come first and anything
+# more than 8 must use tabs.
+               if ($rawline =~ /^\+\s* \t\s*\S/ ||
+                   $rawline =~ /^\+\s*        \s*/) {
+                       my $herevet = "$here\n" . cat_vet($rawline) . "\n";
+                       $rpt_cleaners = 1;
+                       if (ERROR("CODE_INDENT",
+                                 "code indent should use tabs where possible\n" . $herevet) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/^\+([ \t]+)/"\+" . tabify($1)/e;
+                       }
+               }
+
+# check for space before tabs.
+               if ($rawline =~ /^\+/ && $rawline =~ / \t/) {
+                       my $herevet = "$here\n" . cat_vet($rawline) . "\n";
+                       if (WARN("SPACE_BEFORE_TAB",
+                               "please, no space before tabs\n" . $herevet) &&
+                           $fix) {
+                               while ($fixed[$fixlinenr] =~
+                                          s/(^\+.*) {8,8}\t/$1\t\t/) {}
+                               while ($fixed[$fixlinenr] =~
+                                          s/(^\+.*) +\t/$1\t/) {}
+                       }
+               }
+
+# check for && or || at the start of a line
+               if ($rawline =~ /^\+\s*(&&|\|\|)/) {
+                       CHK("LOGICAL_CONTINUATIONS",
+                           "Logical continuations should be on the previous line\n" . $hereprev);
+               }
+
+# check multi-line statement indentation matches previous line
+               if ($^V && $^V ge 5.10.0 &&
+                   $prevline =~ /^\+([ \t]*)((?:$c90_Keywords(?:\s+if)\s*)|(?:$Declare\s*)?(?:$Ident|\(\s*\*\s*$Ident\s*\))\s*|$Ident\s*=\s*$Ident\s*)\(.*(\&\&|\|\||,)\s*$/) {
+                       $prevline =~ /^\+(\t*)(.*)$/;
+                       my $oldindent = $1;
+                       my $rest = $2;
+
+                       my $pos = pos_last_openparen($rest);
+                       if ($pos >= 0) {
+                               $line =~ /^(\+| )([ \t]*)/;
+                               my $newindent = $2;
+
+                               my $goodtabindent = $oldindent .
+                                       "\t" x ($pos / 8) .
+                                       " "  x ($pos % 8);
+                               my $goodspaceindent = $oldindent . " "  x $pos;
+
+                               if ($newindent ne $goodtabindent &&
+                                   $newindent ne $goodspaceindent) {
+
+                                       if (CHK("PARENTHESIS_ALIGNMENT",
+                                               "Alignment should match open parenthesis\n" . $hereprev) &&
+                                           $fix && $line =~ /^\+/) {
+                                               $fixed[$fixlinenr] =~
+                                                   s/^\+[ \t]*/\+$goodtabindent/;
+                                       }
+                               }
+                       }
+               }
+
+# check for space after cast like "(int) foo" or "(struct foo) bar"
+# avoid checking a few false positives:
+#   "sizeof(<type>)" or "__alignof__(<type>)"
+#   function pointer declarations like "(*foo)(int) = bar;"
+#   structure definitions like "(struct foo) { 0 };"
+#   multiline macros that define functions
+#   known attributes or the __attribute__ keyword
+               if ($line =~ /^\+(.*)\(\s*$Type\s*\)([ \t]++)((?![={]|\\$|$Attribute|__attribute__))/ &&
+                   (!defined($1) || $1 !~ /\b(?:sizeof|__alignof__)\s*$/)) {
+                       if (CHK("SPACING",
+                               "No space is necessary after a cast\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~
+                                   s/(\(\s*$Type\s*\))[ \t]+/$1/;
+                       }
+               }
+
+# Block comment styles
+# Networking with an initial /*
+               if ($realfile =~ m@^(drivers/net/|net/)@ &&
+                   $prevrawline =~ /^\+[ \t]*\/\*[ \t]*$/ &&
+                   $rawline =~ /^\+[ \t]*\*/ &&
+                   $realline > 2) {
+                       WARN("NETWORKING_BLOCK_COMMENT_STYLE",
+                            "networking block comments don't use an empty /* line, use /* Comment...\n" . $hereprev);
+               }
+
+# Block comments use * on subsequent lines
+               if ($prevline =~ /$;[ \t]*$/ &&                 #ends in comment
+                   $prevrawline =~ /^\+.*?\/\*/ &&             #starting /*
+                   $prevrawline !~ /\*\/[ \t]*$/ &&            #no trailing */
+                   $rawline =~ /^\+/ &&                        #line is new
+                   $rawline !~ /^\+[ \t]*\*/) {                #no leading *
+                       WARN("BLOCK_COMMENT_STYLE",
+                            "Block comments use * on subsequent lines\n" . $hereprev);
+               }
+
+# Block comments use */ on trailing lines
+               if ($rawline !~ m@^\+[ \t]*\*/[ \t]*$@ &&       #trailing */
+                   $rawline !~ m@^\+.*/\*.*\*/[ \t]*$@ &&      #inline /*...*/
+                   $rawline !~ m@^\+.*\*{2,}/[ \t]*$@ &&       #trailing **/
+                   $rawline =~ m@^\+[ \t]*.+\*\/[ \t]*$@) {    #non blank */
+                       WARN("BLOCK_COMMENT_STYLE",
+                            "Block comments use a trailing */ on a separate line\n" . $herecurr);
+               }
+
+# check for missing blank lines after struct/union declarations
+# with exceptions for various attributes and macros
+               if ($prevline =~ /^[\+ ]};?\s*$/ &&
+                   $line =~ /^\+/ &&
+                   !($line =~ /^\+\s*$/ ||
+                     $line =~ /^\+\s*EXPORT_SYMBOL/ ||
+                     $line =~ /^\+\s*MODULE_/i ||
+                     $line =~ /^\+\s*\#\s*(?:end|elif|else)/ ||
+                     $line =~ /^\+[a-z_]*init/ ||
+                     $line =~ /^\+\s*(?:static\s+)?[A-Z_]*ATTR/ ||
+                     $line =~ /^\+\s*DECLARE/ ||
+                     $line =~ /^\+\s*__setup/)) {
+                       if (CHK("LINE_SPACING",
+                               "Please use a blank line after function/struct/union/enum declarations\n" . $hereprev) &&
+                           $fix) {
+                               fix_insert_line($fixlinenr, "\+");
+                       }
+               }
+
+# check for multiple consecutive blank lines
+               if ($prevline =~ /^[\+ ]\s*$/ &&
+                   $line =~ /^\+\s*$/ &&
+                   $last_blank_line != ($linenr - 1)) {
+                       if (CHK("LINE_SPACING",
+                               "Please don't use multiple blank lines\n" . $hereprev) &&
+                           $fix) {
+                               fix_delete_line($fixlinenr, $rawline);
+                       }
+
+                       $last_blank_line = $linenr;
+               }
+
+# check for missing blank lines after declarations
+               if ($sline =~ /^\+\s+\S/ &&                     #Not at char 1
+                       # actual declarations
+                   ($prevline =~ /^\+\s+$Declare\s*$Ident\s*[=,;:\[]/ ||
+                       # function pointer declarations
+                    $prevline =~ /^\+\s+$Declare\s*\(\s*\*\s*$Ident\s*\)\s*[=,;:\[\(]/ ||
+                       # foo bar; where foo is some local typedef or #define
+                    $prevline =~ /^\+\s+$Ident(?:\s+|\s*\*\s*)$Ident\s*[=,;\[]/ ||
+                       # known declaration macros
+                    $prevline =~ /^\+\s+$declaration_macros/) &&
+                       # for "else if" which can look like "$Ident $Ident"
+                   !($prevline =~ /^\+\s+$c90_Keywords\b/ ||
+                       # other possible extensions of declaration lines
+                     $prevline =~ /(?:$Compare|$Assignment|$Operators)\s*$/ ||
+                       # not starting a section or a macro "\" extended line
+                     $prevline =~ /(?:\{\s*|\\)$/) &&
+                       # looks like a declaration
+                   !($sline =~ /^\+\s+$Declare\s*$Ident\s*[=,;:\[]/ ||
+                       # function pointer declarations
+                     $sline =~ /^\+\s+$Declare\s*\(\s*\*\s*$Ident\s*\)\s*[=,;:\[\(]/ ||
+                       # foo bar; where foo is some local typedef or #define
+                     $sline =~ /^\+\s+$Ident(?:\s+|\s*\*\s*)$Ident\s*[=,;\[]/ ||
+                       # known declaration macros
+                     $sline =~ /^\+\s+$declaration_macros/ ||
+                       # start of struct or union or enum
+                     $sline =~ /^\+\s+(?:union|struct|enum|typedef)\b/ ||
+                       # start or end of block or continuation of declaration
+                     $sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ ||
+                       # bitfield continuation
+                     $sline =~ /^\+\s+$Ident\s*:\s*\d+\s*[,;]/ ||
+                       # other possible extensions of declaration lines
+                     $sline =~ /^\+\s+\(?\s*(?:$Compare|$Assignment|$Operators)/) &&
+                       # indentation of previous and current line are the same
+                   (($prevline =~ /\+(\s+)\S/) && $sline =~ /^\+$1\S/)) {
+                       if (WARN("LINE_SPACING",
+                                "Missing a blank line after declarations\n" . $hereprev) &&
+                           $fix) {
+                               fix_insert_line($fixlinenr, "\+");
+                       }
+               }
+
+# check for spaces at the beginning of a line.
+# Exceptions:
+#  1) within comments
+#  2) indented preprocessor commands
+#  3) hanging labels
+               if ($rawline =~ /^\+ / && $line !~ /^\+ *(?:$;|#|$Ident:)/)  {
+                       my $herevet = "$here\n" . cat_vet($rawline) . "\n";
+                       if (WARN("LEADING_SPACE",
+                                "please, no spaces at the start of a line\n" . $herevet) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/^\+([ \t]+)/"\+" . tabify($1)/e;
+                       }
+               }
+
+# check we are in a valid C source file if not then ignore this hunk
+               next if ($realfile !~ /\.(h|c)$/);
+
+# check indentation of any line with a bare else
+# (but not if it is a multiple line "if (foo) return bar; else return baz;")
+# if the previous line is a break or return and is indented 1 tab more...
+               if ($sline =~ /^\+([\t]+)(?:}[ \t]*)?else(?:[ \t]*{)?\s*$/) {
+                       my $tabs = length($1) + 1;
+                       if ($prevline =~ /^\+\t{$tabs,$tabs}break\b/ ||
+                           ($prevline =~ /^\+\t{$tabs,$tabs}return\b/ &&
+                            defined $lines[$linenr] &&
+                            $lines[$linenr] !~ /^[ \+]\t{$tabs,$tabs}return/)) {
+                               WARN("UNNECESSARY_ELSE",
+                                    "else is not generally useful after a break or return\n" . $hereprev);
+                       }
+               }
+
+# check indentation of a line with a break;
+# if the previous line is a goto or return and is indented the same # of tabs
+               if ($sline =~ /^\+([\t]+)break\s*;\s*$/) {
+                       my $tabs = $1;
+                       if ($prevline =~ /^\+$tabs(?:goto|return)\b/) {
+                               WARN("UNNECESSARY_BREAK",
+                                    "break is not useful after a goto or return\n" . $hereprev);
+                       }
+               }
+
+# discourage the addition of CONFIG_EXPERIMENTAL in #if(def).
+               if ($line =~ /^\+\s*\#\s*if.*\bCONFIG_EXPERIMENTAL\b/) {
+                       WARN("CONFIG_EXPERIMENTAL",
+                            "Use of CONFIG_EXPERIMENTAL is deprecated. For alternatives, see https://lkml.org/lkml/2012/10/23/580\n");
+               }
+
+# check for RCS/CVS revision markers
+               if ($rawline =~ /^\+.*\$(Revision|Log|Id)(?:\$|)/) {
+                       WARN("CVS_KEYWORD",
+                            "CVS style keyword markers, these will _not_ be updated\n". $herecurr);
+               }
+
+# Blackfin: don't use __builtin_bfin_[cs]sync
+               if ($line =~ /__builtin_bfin_csync/) {
+                       my $herevet = "$here\n" . cat_vet($line) . "\n";
+                       ERROR("CSYNC",
+                             "use the CSYNC() macro in asm/blackfin.h\n" . $herevet);
+               }
+               if ($line =~ /__builtin_bfin_ssync/) {
+                       my $herevet = "$here\n" . cat_vet($line) . "\n";
+                       ERROR("SSYNC",
+                             "use the SSYNC() macro in asm/blackfin.h\n" . $herevet);
+               }
+
+# check for old HOTPLUG __dev<foo> section markings
+               if ($line =~ /\b(__dev(init|exit)(data|const|))\b/) {
+                       WARN("HOTPLUG_SECTION",
+                            "Using $1 is unnecessary\n" . $herecurr);
+               }
+
+# Check for potential 'bare' types
+               my ($stat, $cond, $line_nr_next, $remain_next, $off_next,
+                   $realline_next);
+#print "LINE<$line>\n";
+               if ($linenr >= $suppress_statement &&
+                   $realcnt && $sline =~ /.\s*\S/) {
+                       ($stat, $cond, $line_nr_next, $remain_next, $off_next) =
+                               ctx_statement_block($linenr, $realcnt, 0);
+                       $stat =~ s/\n./\n /g;
+                       $cond =~ s/\n./\n /g;
+
+#print "linenr<$linenr> <$stat>\n";
+                       # If this statement has no statement boundaries within
+                       # it there is no point in retrying a statement scan
+                       # until we hit end of it.
+                       my $frag = $stat; $frag =~ s/;+\s*$//;
+                       if ($frag !~ /(?:{|;)/) {
+#print "skip<$line_nr_next>\n";
+                               $suppress_statement = $line_nr_next;
+                       }
+
+                       # Find the real next line.
+                       $realline_next = $line_nr_next;
+                       if (defined $realline_next &&
+                           (!defined $lines[$realline_next - 1] ||
+                            substr($lines[$realline_next - 1], $off_next) =~ /^\s*$/)) {
+                               $realline_next++;
+                       }
+
+                       my $s = $stat;
+                       $s =~ s/{.*$//s;
+
+                       # Ignore goto labels.
+                       if ($s =~ /$Ident:\*$/s) {
+
+                       # Ignore functions being called
+                       } elsif ($s =~ /^.\s*$Ident\s*\(/s) {
+
+                       } elsif ($s =~ /^.\s*else\b/s) {
+
+                       # declarations always start with types
+                       } elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?((?:\s*$Ident)+?)\b(?:\s+$Sparse)?\s*\**\s*(?:$Ident|\(\*[^\)]*\))(?:\s*$Modifier)?\s*(?:;|=|,|\()/s) {
+                               my $type = $1;
+                               $type =~ s/\s+/ /g;
+                               possible($type, "A:" . $s);
+
+                       # definitions in global scope can only start with types
+                       } elsif ($s =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b\s*(?!:)/s) {
+                               possible($1, "B:" . $s);
+                       }
+
+                       # any (foo ... *) is a pointer cast, and foo is a type
+                       while ($s =~ /\(($Ident)(?:\s+$Sparse)*[\s\*]+\s*\)/sg) {
+                               possible($1, "C:" . $s);
+                       }
+
+                       # Check for any sort of function declaration.
+                       # int foo(something bar, other baz);
+                       # void (*store_gdt)(x86_descr_ptr *);
+                       if ($prev_values eq 'E' && $s =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/s) {
+                               my ($name_len) = length($1);
+
+                               my $ctx = $s;
+                               substr($ctx, 0, $name_len + 1, '');
+                               $ctx =~ s/\)[^\)]*$//;
+
+                               for my $arg (split(/\s*,\s*/, $ctx)) {
+                                       if ($arg =~ /^(?:const\s+)?($Ident)(?:\s+$Sparse)*\s*\**\s*(:?\b$Ident)?$/s || $arg =~ /^($Ident)$/s) {
+
+                                               possible($1, "D:" . $s);
+                                       }
+                               }
+                       }
+
+               }
+
+#
+# Checks which may be anchored in the context.
+#
+
+# Check for switch () and associated case and default
+# statements should be at the same indent.
+               if ($line=~/\bswitch\s*\(.*\)/) {
+                       my $err = '';
+                       my $sep = '';
+                       my @ctx = ctx_block_outer($linenr, $realcnt);
+                       shift(@ctx);
+                       for my $ctx (@ctx) {
+                               my ($clen, $cindent) = line_stats($ctx);
+                               if ($ctx =~ /^\+\s*(case\s+|default:)/ &&
+                                                       $indent != $cindent) {
+                                       $err .= "$sep$ctx\n";
+                                       $sep = '';
+                               } else {
+                                       $sep = "[...]\n";
+                               }
+                       }
+                       if ($err ne '') {
+                               ERROR("SWITCH_CASE_INDENT_LEVEL",
+                                     "switch and case should be at the same indent\n$hereline$err");
+                       }
+               }
+
+# if/while/etc brace do not go on next line, unless defining a do while loop,
+# or if that brace on the next line is for something else
+               if ($line =~ /(.*)\b((?:if|while|for|switch|(?:[a-z_]+|)for_each[a-z_]+)\s*\(|do\b|else\b)/ && $line !~ /^.\s*\#/) {
+                       my $pre_ctx = "$1$2";
+
+                       my ($level, @ctx) = ctx_statement_level($linenr, $realcnt, 0);
+
+                       if ($line =~ /^\+\t{6,}/) {
+                               WARN("DEEP_INDENTATION",
+                                    "Too many leading tabs - consider code refactoring\n" . $herecurr);
+                       }
+
+                       my $ctx_cnt = $realcnt - $#ctx - 1;
+                       my $ctx = join("\n", @ctx);
+
+                       my $ctx_ln = $linenr;
+                       my $ctx_skip = $realcnt;
+
+                       while ($ctx_skip > $ctx_cnt || ($ctx_skip == $ctx_cnt &&
+                                       defined $lines[$ctx_ln - 1] &&
+                                       $lines[$ctx_ln - 1] =~ /^-/)) {
+                               ##print "SKIP<$ctx_skip> CNT<$ctx_cnt>\n";
+                               $ctx_skip-- if (!defined $lines[$ctx_ln - 1] || $lines[$ctx_ln - 1] !~ /^-/);
+                               $ctx_ln++;
+                       }
+
+                       #print "realcnt<$realcnt> ctx_cnt<$ctx_cnt>\n";
+                       #print "pre<$pre_ctx>\nline<$line>\nctx<$ctx>\nnext<$lines[$ctx_ln - 1]>\n";
+
+                       if ($ctx !~ /{\s*/ && defined($lines[$ctx_ln - 1]) && $lines[$ctx_ln - 1] =~ /^\+\s*{/) {
+                               ERROR("OPEN_BRACE",
+                                     "that open brace { should be on the previous line\n" .
+                                       "$here\n$ctx\n$rawlines[$ctx_ln - 1]\n");
+                       }
+                       if ($level == 0 && $pre_ctx !~ /}\s*while\s*\($/ &&
+                           $ctx =~ /\)\s*\;\s*$/ &&
+                           defined $lines[$ctx_ln - 1])
+                       {
+                               my ($nlength, $nindent) = line_stats($lines[$ctx_ln - 1]);
+                               if ($nindent > $indent) {
+                                       WARN("TRAILING_SEMICOLON",
+                                            "trailing semicolon indicates no statements, indent implies otherwise\n" .
+                                               "$here\n$ctx\n$rawlines[$ctx_ln - 1]\n");
+                               }
+                       }
+               }
+
+# Check relative indent for conditionals and blocks.
+               if ($line =~ /\b(?:(?:if|while|for|(?:[a-z_]+|)for_each[a-z_]+)\s*\(|do\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
+                       ($stat, $cond, $line_nr_next, $remain_next, $off_next) =
+                               ctx_statement_block($linenr, $realcnt, 0)
+                                       if (!defined $stat);
+                       my ($s, $c) = ($stat, $cond);
+
+                       substr($s, 0, length($c), '');
+
+                       # remove inline comments
+                       $s =~ s/$;/ /g;
+                       $c =~ s/$;/ /g;
+
+                       # Find out how long the conditional actually is.
+                       my @newlines = ($c =~ /\n/gs);
+                       my $cond_lines = 1 + $#newlines;
+
+                       # Make sure we remove the line prefixes as we have
+                       # none on the first line, and are going to readd them
+                       # where necessary.
+                       $s =~ s/\n./\n/gs;
+                       while ($s =~ /\n\s+\\\n/) {
+                               $cond_lines += $s =~ s/\n\s+\\\n/\n/g;
+                       }
+
+                       # We want to check the first line inside the block
+                       # starting at the end of the conditional, so remove:
+                       #  1) any blank line termination
+                       #  2) any opening brace { on end of the line
+                       #  3) any do (...) {
+                       my $continuation = 0;
+                       my $check = 0;
+                       $s =~ s/^.*\bdo\b//;
+                       $s =~ s/^\s*{//;
+                       if ($s =~ s/^\s*\\//) {
+                               $continuation = 1;
+                       }
+                       if ($s =~ s/^\s*?\n//) {
+                               $check = 1;
+                               $cond_lines++;
+                       }
+
+                       # Also ignore a loop construct at the end of a
+                       # preprocessor statement.
+                       if (($prevline =~ /^.\s*#\s*define\s/ ||
+                           $prevline =~ /\\\s*$/) && $continuation == 0) {
+                               $check = 0;
+                       }
+
+                       my $cond_ptr = -1;
+                       $continuation = 0;
+                       while ($cond_ptr != $cond_lines) {
+                               $cond_ptr = $cond_lines;
+
+                               # If we see an #else/#elif then the code
+                               # is not linear.
+                               if ($s =~ /^\s*\#\s*(?:else|elif)/) {
+                                       $check = 0;
+                               }
+
+                               # Ignore:
+                               #  1) blank lines, they should be at 0,
+                               #  2) preprocessor lines, and
+                               #  3) labels.
+                               if ($continuation ||
+                                   $s =~ /^\s*?\n/ ||
+                                   $s =~ /^\s*#\s*?/ ||
+                                   $s =~ /^\s*$Ident\s*:/) {
+                                       $continuation = ($s =~ /^.*?\\\n/) ? 1 : 0;
+                                       if ($s =~ s/^.*?\n//) {
+                                               $cond_lines++;
+                                       }
+                               }
+                       }
+
+                       my (undef, $sindent) = line_stats("+" . $s);
+                       my $stat_real = raw_line($linenr, $cond_lines);
+
+                       # Check if either of these lines are modified, else
+                       # this is not this patch's fault.
+                       if (!defined($stat_real) ||
+                           $stat !~ /^\+/ && $stat_real !~ /^\+/) {
+                               $check = 0;
+                       }
+                       if (defined($stat_real) && $cond_lines > 1) {
+                               $stat_real = "[...]\n$stat_real";
+                       }
+
+                       #print "line<$line> prevline<$prevline> indent<$indent> sindent<$sindent> check<$check> continuation<$continuation> s<$s> cond_lines<$cond_lines> stat_real<$stat_real> stat<$stat>\n";
+
+                       if ($check && $s ne '' &&
+                           (($sindent % 8) != 0 ||
+                            ($sindent < $indent) ||
+                            ($sindent > $indent + 8))) {
+                               WARN("SUSPECT_CODE_INDENT",
+                                    "suspect code indent for conditional statements ($indent, $sindent)\n" . $herecurr . "$stat_real\n");
+                       }
+               }
+
+               # Track the 'values' across context and added lines.
+               my $opline = $line; $opline =~ s/^./ /;
+               my ($curr_values, $curr_vars) =
+                               annotate_values($opline . "\n", $prev_values);
+               $curr_values = $prev_values . $curr_values;
+               if ($dbg_values) {
+                       my $outline = $opline; $outline =~ s/\t/ /g;
+                       print "$linenr > .$outline\n";
+                       print "$linenr > $curr_values\n";
+                       print "$linenr >  $curr_vars\n";
+               }
+               $prev_values = substr($curr_values, -1);
+
+#ignore lines not being added
+               next if ($line =~ /^[^\+]/);
+
+# check for declarations of signed or unsigned without int
+               while ($line =~ m{($Declare)\s*(?!char\b|short\b|int\b|long\b)\s*($Ident)?\s*[=,;\[\)\(]}g) {
+                       my $type = $1;
+                       my $var = $2;
+                       $var = "" if (!defined $var);
+                       if ($type =~ /^(?:(?:$Storage|$Inline|$Attribute)\s+)*((?:un)?signed)((?:\s*\*)*)\s*$/) {
+                               my $sign = $1;
+                               my $pointer = $2;
+
+                               $pointer = "" if (!defined $pointer);
+
+                               if (WARN("UNSPECIFIED_INT",
+                                        "Prefer '" . trim($sign) . " int" . rtrim($pointer) . "' to bare use of '$sign" . rtrim($pointer) . "'\n" . $herecurr) &&
+                                   $fix) {
+                                       my $decl = trim($sign) . " int ";
+                                       my $comp_pointer = $pointer;
+                                       $comp_pointer =~ s/\s//g;
+                                       $decl .= $comp_pointer;
+                                       $decl = rtrim($decl) if ($var eq "");
+                                       $fixed[$fixlinenr] =~ s@\b$sign\s*\Q$pointer\E\s*$var\b@$decl$var@;
+                               }
+                       }
+               }
+
+# TEST: allow direct testing of the type matcher.
+               if ($dbg_type) {
+                       if ($line =~ /^.\s*$Declare\s*$/) {
+                               ERROR("TEST_TYPE",
+                                     "TEST: is type\n" . $herecurr);
+                       } elsif ($dbg_type > 1 && $line =~ /^.+($Declare)/) {
+                               ERROR("TEST_NOT_TYPE",
+                                     "TEST: is not type ($1 is)\n". $herecurr);
+                       }
+                       next;
+               }
+# TEST: allow direct testing of the attribute matcher.
+               if ($dbg_attr) {
+                       if ($line =~ /^.\s*$Modifier\s*$/) {
+                               ERROR("TEST_ATTR",
+                                     "TEST: is attr\n" . $herecurr);
+                       } elsif ($dbg_attr > 1 && $line =~ /^.+($Modifier)/) {
+                               ERROR("TEST_NOT_ATTR",
+                                     "TEST: is not attr ($1 is)\n". $herecurr);
+                       }
+                       next;
+               }
+
+# check for initialisation to aggregates open brace on the next line
+               if ($line =~ /^.\s*{/ &&
+                   $prevline =~ /(?:^|[^=])=\s*$/) {
+                       if (ERROR("OPEN_BRACE",
+                                 "that open brace { should be on the previous line\n" . $hereprev) &&
+                           $fix && $prevline =~ /^\+/ && $line =~ /^\+/) {
+                               fix_delete_line($fixlinenr - 1, $prevrawline);
+                               fix_delete_line($fixlinenr, $rawline);
+                               my $fixedline = $prevrawline;
+                               $fixedline =~ s/\s*=\s*$/ = {/;
+                               fix_insert_line($fixlinenr, $fixedline);
+                               $fixedline = $line;
+                               $fixedline =~ s/^(.\s*){\s*/$1/;
+                               fix_insert_line($fixlinenr, $fixedline);
+                       }
+               }
+
+#
+# Checks which are anchored on the added line.
+#
+
+# check for malformed paths in #include statements (uses RAW line)
+               if ($rawline =~ m{^.\s*\#\s*include\s+[<"](.*)[">]}) {
+                       my $path = $1;
+                       if ($path =~ m{//}) {
+                               ERROR("MALFORMED_INCLUDE",
+                                     "malformed #include filename\n" . $herecurr);
+                       }
+                       if ($path =~ "^uapi/" && $realfile =~ m@\binclude/uapi/@) {
+                               ERROR("UAPI_INCLUDE",
+                                     "No #include in ...include/uapi/... should use a uapi/ path prefix\n" . $herecurr);
+                       }
+               }
+
+# no C99 // comments
+               if ($line =~ m{//}) {
+                       if (ERROR("C99_COMMENTS",
+                                 "do not use C99 // comments\n" . $herecurr) &&
+                           $fix) {
+                               my $line = $fixed[$fixlinenr];
+                               if ($line =~ /\/\/(.*)$/) {
+                                       my $comment = trim($1);
+                                       $fixed[$fixlinenr] =~ s@\/\/(.*)$@/\* $comment \*/@;
+                               }
+                       }
+               }
+               # Remove C99 comments.
+               $line =~ s@//.*@@;
+               $opline =~ s@//.*@@;
+
+# EXPORT_SYMBOL should immediately follow the thing it is exporting, consider
+# the whole statement.
+#print "APW <$lines[$realline_next - 1]>\n";
+               if (defined $realline_next &&
+                   exists $lines[$realline_next - 1] &&
+                   !defined $suppress_export{$realline_next} &&
+                   ($lines[$realline_next - 1] =~ /EXPORT_SYMBOL.*\((.*)\)/ ||
+                    $lines[$realline_next - 1] =~ /EXPORT_UNUSED_SYMBOL.*\((.*)\)/)) {
+                       # Handle definitions which produce identifiers with
+                       # a prefix:
+                       #   XXX(foo);
+                       #   EXPORT_SYMBOL(something_foo);
+                       my $name = $1;
+                       if ($stat =~ /^(?:.\s*}\s*\n)?.([A-Z_]+)\s*\(\s*($Ident)/ &&
+                           $name =~ /^${Ident}_$2/) {
+#print "FOO C name<$name>\n";
+                               $suppress_export{$realline_next} = 1;
+
+                       } elsif ($stat !~ /(?:
+                               \n.}\s*$|
+                               ^.DEFINE_$Ident\(\Q$name\E\)|
+                               ^.DECLARE_$Ident\(\Q$name\E\)|
+                               ^.LIST_HEAD\(\Q$name\E\)|
+                               ^.(?:$Storage\s+)?$Type\s*\(\s*\*\s*\Q$name\E\s*\)\s*\(|
+                               \b\Q$name\E(?:\s+$Attribute)*\s*(?:;|=|\[|\()
+                           )/x) {
+#print "FOO A<$lines[$realline_next - 1]> stat<$stat> name<$name>\n";
+                               $suppress_export{$realline_next} = 2;
+                       } else {
+                               $suppress_export{$realline_next} = 1;
+                       }
+               }
+               if (!defined $suppress_export{$linenr} &&
+                   $prevline =~ /^.\s*$/ &&
+                   ($line =~ /EXPORT_SYMBOL.*\((.*)\)/ ||
+                    $line =~ /EXPORT_UNUSED_SYMBOL.*\((.*)\)/)) {
+#print "FOO B <$lines[$linenr - 1]>\n";
+                       $suppress_export{$linenr} = 2;
+               }
+               if (defined $suppress_export{$linenr} &&
+                   $suppress_export{$linenr} == 2) {
+                       WARN("EXPORT_SYMBOL",
+                            "EXPORT_SYMBOL(foo); should immediately follow its function/variable\n" . $herecurr);
+               }
+
+# check for global initialisers.
+               if ($line =~ /^\+$Type\s*$Ident(?:\s+$Modifier)*\s*=\s*($zero_initializer)\s*;/) {
+                       if (ERROR("GLOBAL_INITIALISERS",
+                                 "do not initialise globals to $1\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/(^.$Type\s*$Ident(?:\s+$Modifier)*)\s*=\s*$zero_initializer\s*;/$1;/;
+                       }
+               }
+# check for static initialisers.
+               if ($line =~ /^\+.*\bstatic\s.*=\s*($zero_initializer)\s*;/) {
+                       if (ERROR("INITIALISED_STATIC",
+                                 "do not initialise statics to $1\n" .
+                                     $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/(\bstatic\s.*?)\s*=\s*$zero_initializer\s*;/$1;/;
+                       }
+               }
+
+# check for misordered declarations of char/short/int/long with signed/unsigned
+               while ($sline =~ m{(\b$TypeMisordered\b)}g) {
+                       my $tmp = trim($1);
+                       WARN("MISORDERED_TYPE",
+                            "type '$tmp' should be specified in [[un]signed] [short|int|long|long long] order\n" . $herecurr);
+               }
+
+# check for static const char * arrays.
+               if ($line =~ /\bstatic\s+const\s+char\s*\*\s*(\w+)\s*\[\s*\]\s*=\s*/) {
+                       WARN("STATIC_CONST_CHAR_ARRAY",
+                            "static const char * array should probably be static const char * const\n" .
+                               $herecurr);
+               }
+
+# check for static char foo[] = "bar" declarations.
+               if ($line =~ /\bstatic\s+char\s+(\w+)\s*\[\s*\]\s*=\s*"/) {
+                       WARN("STATIC_CONST_CHAR_ARRAY",
+                            "static char array declaration should probably be static const char\n" .
+                               $herecurr);
+               }
+
+# check for const <foo> const where <foo> is not a pointer or array type
+               if ($sline =~ /\bconst\s+($BasicType)\s+const\b/) {
+                       my $found = $1;
+                       if ($sline =~ /\bconst\s+\Q$found\E\s+const\b\s*\*/) {
+                               WARN("CONST_CONST",
+                                    "'const $found const *' should probably be 'const $found * const'\n" . $herecurr);
+                       } elsif ($sline !~ /\bconst\s+\Q$found\E\s+const\s+\w+\s*\[/) {
+                               WARN("CONST_CONST",
+                                    "'const $found const' should probably be 'const $found'\n" . $herecurr);
+                       }
+               }
+
+# check for non-global char *foo[] = {"bar", ...} declarations.
+               if ($line =~ /^.\s+(?:static\s+|const\s+)?char\s+\*\s*\w+\s*\[\s*\]\s*=\s*\{/) {
+                       WARN("STATIC_CONST_CHAR_ARRAY",
+                            "char * array declaration might be better as static const\n" .
+                               $herecurr);
+               }
+
+# check for sizeof(foo)/sizeof(foo[0]) that could be ARRAY_SIZE(foo)
+               if ($line =~ m@\bsizeof\s*\(\s*($Lval)\s*\)@) {
+                       my $array = $1;
+                       if ($line =~ m@\b(sizeof\s*\(\s*\Q$array\E\s*\)\s*/\s*sizeof\s*\(\s*\Q$array\E\s*\[\s*0\s*\]\s*\))@) {
+                               my $array_div = $1;
+                               if (WARN("ARRAY_SIZE",
+                                        "Prefer ARRAY_SIZE($array)\n" . $herecurr) &&
+                                   $fix) {
+                                       $fixed[$fixlinenr] =~ s/\Q$array_div\E/ARRAY_SIZE($array)/;
+                               }
+                       }
+               }
+
+# check for function declarations without arguments like "int foo()"
+               if ($line =~ /(\b$Type\s+$Ident)\s*\(\s*\)/) {
+                       if (ERROR("FUNCTION_WITHOUT_ARGS",
+                                 "Bad function definition - $1() should probably be $1(void)\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/(\b($Type)\s+($Ident))\s*\(\s*\)/$2 $3(void)/;
+                       }
+               }
+
+# check for uses of DEFINE_PCI_DEVICE_TABLE
+               if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) {
+                       if (WARN("DEFINE_PCI_DEVICE_TABLE",
+                                "Prefer struct pci_device_id over deprecated DEFINE_PCI_DEVICE_TABLE\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/\b(?:static\s+|)DEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=\s*/static const struct pci_device_id $1\[\] = /;
+                       }
+               }
+
+# check for new typedefs, only function parameters and sparse annotations
+# make sense.
+               if ($line =~ /\btypedef\s/ &&
+                   $line !~ /\btypedef\s+$Type\s*\(\s*\*?$Ident\s*\)\s*\(/ &&
+                   $line !~ /\btypedef\s+$Type\s+$Ident\s*\(/ &&
+                   $line !~ /\b$typeTypedefs\b/ &&
+                   $line !~ /\b__bitwise(?:__|)\b/) {
+                       WARN("NEW_TYPEDEFS",
+                            "do not add new typedefs\n" . $herecurr);
+               }
+
+# * goes on variable not on type
+               # (char*[ const])
+               while ($line =~ m{(\($NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)\))}g) {
+                       #print "AA<$1>\n";
+                       my ($ident, $from, $to) = ($1, $2, $2);
+
+                       # Should start with a space.
+                       $to =~ s/^(\S)/ $1/;
+                       # Should not end with a space.
+                       $to =~ s/\s+$//;
+                       # '*'s should not have spaces between.
+                       while ($to =~ s/\*\s+\*/\*\*/) {
+                       }
+
+##                     print "1: from<$from> to<$to> ident<$ident>\n";
+                       if ($from ne $to) {
+                               if (ERROR("POINTER_LOCATION",
+                                         "\"(foo$from)\" should be \"(foo$to)\"\n" .  $herecurr) &&
+                                   $fix) {
+                                       my $sub_from = $ident;
+                                       my $sub_to = $ident;
+                                       $sub_to =~ s/\Q$from\E/$to/;
+                                       $fixed[$fixlinenr] =~
+                                           s@\Q$sub_from\E@$sub_to@;
+                               }
+                       }
+               }
+               while ($line =~ m{(\b$NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)($Ident))}g) {
+                       #print "BB<$1>\n";
+                       my ($match, $from, $to, $ident) = ($1, $2, $2, $3);
+
+                       # Should start with a space.
+                       $to =~ s/^(\S)/ $1/;
+                       # Should not end with a space.
+                       $to =~ s/\s+$//;
+                       # '*'s should not have spaces between.
+                       while ($to =~ s/\*\s+\*/\*\*/) {
+                       }
+                       # Modifiers should have spaces.
+                       $to =~ s/(\b$Modifier$)/$1 /;
+
+##                     print "2: from<$from> to<$to> ident<$ident>\n";
+                       if ($from ne $to && $ident !~ /^$Modifier$/) {
+                               if (ERROR("POINTER_LOCATION",
+                                         "\"foo${from}bar\" should be \"foo${to}bar\"\n" .  $herecurr) &&
+                                   $fix) {
+
+                                       my $sub_from = $match;
+                                       my $sub_to = $match;
+                                       $sub_to =~ s/\Q$from\E/$to/;
+                                       $fixed[$fixlinenr] =~
+                                           s@\Q$sub_from\E@$sub_to@;
+                               }
+                       }
+               }
+
+# avoid BUG() or BUG_ON()
+               if ($line =~ /\b(?:BUG|BUG_ON)\b/) {
+                       my $msg_type = \&WARN;
+                       $msg_type = \&CHK if ($file);
+                       &{$msg_type}("AVOID_BUG",
+                                    "Avoid crashing the kernel - try using WARN_ON & recovery code rather than BUG() or BUG_ON()\n" . $herecurr);
+               }
+
+# avoid LINUX_VERSION_CODE
+               if ($line =~ /\bLINUX_VERSION_CODE\b/) {
+                       WARN("LINUX_VERSION_CODE",
+                            "LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\n" . $herecurr);
+               }
+
+# check for uses of printk_ratelimit
+               if ($line =~ /\bprintk_ratelimit\s*\(/) {
+                       WARN("PRINTK_RATELIMITED",
+                            "Prefer printk_ratelimited or pr_<level>_ratelimited to printk_ratelimit\n" . $herecurr);
+               }
+
+# printk should use KERN_* levels.  Note that follow on printk's on the
+# same line do not need a level, so we use the current block context
+# to try and find and validate the current printk.  In summary the current
+# printk includes all preceding printk's which have no newline on the end.
+# we assume the first bad printk is the one to report.
+               if ($line =~ /\bprintk\((?!KERN_)\s*"/) {
+                       my $ok = 0;
+                       for (my $ln = $linenr - 1; $ln >= $first_line; $ln--) {
+                               #print "CHECK<$lines[$ln - 1]\n";
+                               # we have a preceding printk if it ends
+                               # with "\n" ignore it, else it is to blame
+                               if ($lines[$ln - 1] =~ m{\bprintk\(}) {
+                                       if ($rawlines[$ln - 1] !~ m{\\n"}) {
+                                               $ok = 1;
+                                       }
+                                       last;
+                               }
+                       }
+                       if ($ok == 0) {
+                               WARN("PRINTK_WITHOUT_KERN_LEVEL",
+                                    "printk() should include KERN_ facility level\n" . $herecurr);
+                       }
+               }
+
+               if ($line =~ /\bprintk\s*\(\s*KERN_([A-Z]+)/) {
+                       my $orig = $1;
+                       my $level = lc($orig);
+                       $level = "warn" if ($level eq "warning");
+                       my $level2 = $level;
+                       $level2 = "dbg" if ($level eq "debug");
+                       WARN("PREFER_PR_LEVEL",
+                            "Prefer [subsystem eg: netdev]_$level2([subsystem]dev, ... then dev_$level2(dev, ... then pr_$level(...  to printk(KERN_$orig ...\n" . $herecurr);
+               }
+
+               if ($line =~ /\bpr_warning\s*\(/) {
+                       if (WARN("PREFER_PR_LEVEL",
+                                "Prefer pr_warn(... to pr_warning(...\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~
+                                   s/\bpr_warning\b/pr_warn/;
+                       }
+               }
+
+               if ($line =~ /\bdev_printk\s*\(\s*KERN_([A-Z]+)/) {
+                       my $orig = $1;
+                       my $level = lc($orig);
+                       $level = "warn" if ($level eq "warning");
+                       $level = "dbg" if ($level eq "debug");
+                       WARN("PREFER_DEV_LEVEL",
+                            "Prefer dev_$level(... to dev_printk(KERN_$orig, ...\n" . $herecurr);
+               }
+
+# ENOSYS means "bad syscall nr" and nothing else.  This will have a small
+# number of false positives, but assembly files are not checked, so at
+# least the arch entry code will not trigger this warning.
+               if ($line =~ /\bENOSYS\b/) {
+                       WARN("ENOSYS",
+                            "ENOSYS means 'invalid syscall nr' and nothing else\n" . $herecurr);
+               }
+
+# function brace can't be on same line, except for #defines of do while,
+# or if closed on same line
+               if (($line=~/$Type\s*$Ident\(.*\).*\s*{/) and
+                   !($line=~/\#\s*define.*do\s\{/) and !($line=~/}/)) {
+                       if (ERROR("OPEN_BRACE",
+                                 "open brace '{' following function declarations go on the next line\n" . $herecurr) &&
+                           $fix) {
+                               fix_delete_line($fixlinenr, $rawline);
+                               my $fixed_line = $rawline;
+                               $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*){(.*)$/;
+                               my $line1 = $1;
+                               my $line2 = $2;
+                               fix_insert_line($fixlinenr, ltrim($line1));
+                               fix_insert_line($fixlinenr, "\+{");
+                               if ($line2 !~ /^\s*$/) {
+                                       fix_insert_line($fixlinenr, "\+\t" . trim($line2));
+                               }
+                       }
+               }
+
+# open braces for enum, union and struct go on the same line.
+               if ($line =~ /^.\s*{/ &&
+                   $prevline =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident)?\s*$/) {
+                       if (ERROR("OPEN_BRACE",
+                                 "open brace '{' following $1 go on the same line\n" . $hereprev) &&
+                           $fix && $prevline =~ /^\+/ && $line =~ /^\+/) {
+                               fix_delete_line($fixlinenr - 1, $prevrawline);
+                               fix_delete_line($fixlinenr, $rawline);
+                               my $fixedline = rtrim($prevrawline) . " {";
+                               fix_insert_line($fixlinenr, $fixedline);
+                               $fixedline = $rawline;
+                               $fixedline =~ s/^(.\s*){\s*/$1\t/;
+                               if ($fixedline !~ /^\+\s*$/) {
+                                       fix_insert_line($fixlinenr, $fixedline);
+                               }
+                       }
+               }
+
+# missing space after union, struct or enum definition
+               if ($line =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident){1,2}[=\{]/) {
+                       if (WARN("SPACING",
+                                "missing space after $1 definition\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~
+                                   s/^(.\s*(?:typedef\s+)?(?:enum|union|struct)(?:\s+$Ident){1,2})([=\{])/$1 $2/;
+                       }
+               }
+
+# Function pointer declarations
+# check spacing between type, funcptr, and args
+# canonical declaration is "type (*funcptr)(args...)"
+               if ($line =~ /^.\s*($Declare)\((\s*)\*(\s*)($Ident)(\s*)\)(\s*)\(/) {
+                       my $declare = $1;
+                       my $pre_pointer_space = $2;
+                       my $post_pointer_space = $3;
+                       my $funcname = $4;
+                       my $post_funcname_space = $5;
+                       my $pre_args_space = $6;
+
+# the $Declare variable will capture all spaces after the type
+# so check it for a missing trailing missing space but pointer return types
+# don't need a space so don't warn for those.
+                       my $post_declare_space = "";
+                       if ($declare =~ /(\s+)$/) {
+                               $post_declare_space = $1;
+                               $declare = rtrim($declare);
+                       }
+                       if ($declare !~ /\*$/ && $post_declare_space =~ /^$/) {
+                               WARN("SPACING",
+                                    "missing space after return type\n" . $herecurr);
+                               $post_declare_space = " ";
+                       }
+
+# unnecessary space "type  (*funcptr)(args...)"
+# This test is not currently implemented because these declarations are
+# equivalent to
+#      int  foo(int bar, ...)
+# and this is form shouldn't/doesn't generate a checkpatch warning.
+#
+#                      elsif ($declare =~ /\s{2,}$/) {
+#                              WARN("SPACING",
+#                                   "Multiple spaces after return type\n" . $herecurr);
+#                      }
+
+# unnecessary space "type ( *funcptr)(args...)"
+                       if (defined $pre_pointer_space &&
+                           $pre_pointer_space =~ /^\s/) {
+                               WARN("SPACING",
+                                    "Unnecessary space after function pointer open parenthesis\n" . $herecurr);
+                       }
+
+# unnecessary space "type (* funcptr)(args...)"
+                       if (defined $post_pointer_space &&
+                           $post_pointer_space =~ /^\s/) {
+                               WARN("SPACING",
+                                    "Unnecessary space before function pointer name\n" . $herecurr);
+                       }
+
+# unnecessary space "type (*funcptr )(args...)"
+                       if (defined $post_funcname_space &&
+                           $post_funcname_space =~ /^\s/) {
+                               WARN("SPACING",
+                                    "Unnecessary space after function pointer name\n" . $herecurr);
+                       }
+
+# unnecessary space "type (*funcptr) (args...)"
+                       if (defined $pre_args_space &&
+                           $pre_args_space =~ /^\s/) {
+                               WARN("SPACING",
+                                    "Unnecessary space before function pointer arguments\n" . $herecurr);
+                       }
+
+                       if (show_type("SPACING") && $fix) {
+                               $fixed[$fixlinenr] =~
+                                   s/^(.\s*)$Declare\s*\(\s*\*\s*$Ident\s*\)\s*\(/$1 . $declare . $post_declare_space . '(*' . $funcname . ')('/ex;
+                       }
+               }
+
+# check for spacing round square brackets; allowed:
+#  1. with a type on the left -- int [] a;
+#  2. at the beginning of a line for slice initialisers -- [0...10] = 5,
+#  3. inside a curly brace -- = { [0...10] = 5 }
+               while ($line =~ /(.*?\s)\[/g) {
+                       my ($where, $prefix) = ($-[1], $1);
+                       if ($prefix !~ /$Type\s+$/ &&
+                           ($where != 0 || $prefix !~ /^.\s+$/) &&
+                           $prefix !~ /[{,]\s+$/) {
+                               if (ERROR("BRACKET_SPACE",
+                                         "space prohibited before open square bracket '['\n" . $herecurr) &&
+                                   $fix) {
+                                   $fixed[$fixlinenr] =~
+                                       s/^(\+.*?)\s+\[/$1\[/;
+                               }
+                       }
+               }
+
+# check for spaces between functions and their parentheses.
+               while ($line =~ /($Ident)\s+\(/g) {
+                       my $name = $1;
+                       my $ctx_before = substr($line, 0, $-[1]);
+                       my $ctx = "$ctx_before$name";
+
+                       # Ignore those directives where spaces _are_ permitted.
+                       if ($name =~ /^(?:
+                               if|for|while|switch|return|case|
+                               volatile|__volatile__|
+                               __attribute__|format|__extension__|
+                               asm|__asm__)$/x)
+                       {
+                       # cpp #define statements have non-optional spaces, ie
+                       # if there is a space between the name and the open
+                       # parenthesis it is simply not a parameter group.
+                       } elsif ($ctx_before =~ /^.\s*\#\s*define\s*$/) {
+
+                       # cpp #elif statement condition may start with a (
+                       } elsif ($ctx =~ /^.\s*\#\s*elif\s*$/) {
+
+                       # If this whole things ends with a type its most
+                       # likely a typedef for a function.
+                       } elsif ($ctx =~ /$Type$/) {
+
+                       } else {
+                               if (WARN("SPACING",
+                                        "space prohibited between function name and open parenthesis '('\n" . $herecurr) &&
+                                            $fix) {
+                                       $fixed[$fixlinenr] =~
+                                           s/\b$name\s+\(/$name\(/;
+                               }
+                       }
+               }
+
+# Check operator spacing.
+               if (!($line=~/\#\s*include/)) {
+                       my $fixed_line = "";
+                       my $line_fixed = 0;
+
+                       my $ops = qr{
+                               <<=|>>=|<=|>=|==|!=|
+                               \+=|-=|\*=|\/=|%=|\^=|\|=|&=|
+                               =>|->|<<|>>|<|>|=|!|~|
+                               &&|\|\||,|\^|\+\+|--|&|\||\+|-|\*|\/|%|
+                               \?:|\?|:
+                       }x;
+                       my @elements = split(/($ops|;)/, $opline);
+
+##                     print("element count: <" . $#elements . ">\n");
+##                     foreach my $el (@elements) {
+##                             print("el: <$el>\n");
+##                     }
+
+                       my @fix_elements = ();
+                       my $off = 0;
+
+                       foreach my $el (@elements) {
+                               push(@fix_elements, substr($rawline, $off, length($el)));
+                               $off += length($el);
+                       }
+
+                       $off = 0;
+
+                       my $blank = copy_spacing($opline);
+                       my $last_after = -1;
+
+                       for (my $n = 0; $n < $#elements; $n += 2) {
+
+                               my $good = $fix_elements[$n] . $fix_elements[$n + 1];
+
+##                             print("n: <$n> good: <$good>\n");
+
+                               $off += length($elements[$n]);
+
+                               # Pick up the preceding and succeeding characters.
+                               my $ca = substr($opline, 0, $off);
+                               my $cc = '';
+                               if (length($opline) >= ($off + length($elements[$n + 1]))) {
+                                       $cc = substr($opline, $off + length($elements[$n + 1]));
+                               }
+                               my $cb = "$ca$;$cc";
+
+                               my $a = '';
+                               $a = 'V' if ($elements[$n] ne '');
+                               $a = 'W' if ($elements[$n] =~ /\s$/);
+                               $a = 'C' if ($elements[$n] =~ /$;$/);
+                               $a = 'B' if ($elements[$n] =~ /(\[|\()$/);
+                               $a = 'O' if ($elements[$n] eq '');
+                               $a = 'E' if ($ca =~ /^\s*$/);
+
+                               my $op = $elements[$n + 1];
+
+                               my $c = '';
+                               if (defined $elements[$n + 2]) {
+                                       $c = 'V' if ($elements[$n + 2] ne '');
+                                       $c = 'W' if ($elements[$n + 2] =~ /^\s/);
+                                       $c = 'C' if ($elements[$n + 2] =~ /^$;/);
+                                       $c = 'B' if ($elements[$n + 2] =~ /^(\)|\]|;)/);
+                                       $c = 'O' if ($elements[$n + 2] eq '');
+                                       $c = 'E' if ($elements[$n + 2] =~ /^\s*\\$/);
+                               } else {
+                                       $c = 'E';
+                               }
+
+                               my $ctx = "${a}x${c}";
+
+                               my $at = "(ctx:$ctx)";
+
+                               my $ptr = substr($blank, 0, $off) . "^";
+                               my $hereptr = "$hereline$ptr\n";
+
+                               # Pull out the value of this operator.
+                               my $op_type = substr($curr_values, $off + 1, 1);
+
+                               # Get the full operator variant.
+                               my $opv = $op . substr($curr_vars, $off, 1);
+
+                               # Ignore operators passed as parameters.
+                               if ($op_type ne 'V' &&
+                                   $ca =~ /\s$/ && $cc =~ /^\s*[,\)]/) {
+
+#                              # Ignore comments
+#                              } elsif ($op =~ /^$;+$/) {
+
+                               # ; should have either the end of line or a space or \ after it
+                               } elsif ($op eq ';') {
+                                       if ($ctx !~ /.x[WEBC]/ &&
+                                           $cc !~ /^\\/ && $cc !~ /^;/) {
+                                               if (ERROR("SPACING",
+                                                         "space required after that '$op' $at\n" . $hereptr)) {
+                                                       $good = $fix_elements[$n] . trim($fix_elements[$n + 1]) . " ";
+                                                       $line_fixed = 1;
+                                               }
+                                       }
+
+                               # // is a comment
+                               } elsif ($op eq '//') {
+
+                               #   :   when part of a bitfield
+                               } elsif ($opv eq ':B') {
+                                       # skip the bitfield test for now
+
+                               # No spaces for:
+                               #   ->
+                               } elsif ($op eq '->') {
+                                       if ($ctx =~ /Wx.|.xW/) {
+                                               if (ERROR("SPACING",
+                                                         "spaces prohibited around that '$op' $at\n" . $hereptr)) {
+                                                       $good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
+                                                       if (defined $fix_elements[$n + 2]) {
+                                                               $fix_elements[$n + 2] =~ s/^\s+//;
+                                                       }
+                                                       $line_fixed = 1;
+                                               }
+                                       }
+
+                               # , must not have a space before and must have a space on the right.
+                               } elsif ($op eq ',') {
+                                       my $rtrim_before = 0;
+                                       my $space_after = 0;
+                                       if ($ctx =~ /Wx./) {
+                                               if (ERROR("SPACING",
+                                                         "space prohibited before that '$op' $at\n" . $hereptr)) {
+                                                       $line_fixed = 1;
+                                                       $rtrim_before = 1;
+                                               }
+                                       }
+                                       if ($ctx !~ /.x[WEC]/ && $cc !~ /^}/) {
+                                               if (ERROR("SPACING",
+                                                         "space required after that '$op' $at\n" . $hereptr)) {
+                                                       $line_fixed = 1;
+                                                       $last_after = $n;
+                                                       $space_after = 1;
+                                               }
+                                       }
+                                       if ($rtrim_before || $space_after) {
+                                               if ($rtrim_before) {
+                                                       $good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
+                                               } else {
+                                                       $good = $fix_elements[$n] . trim($fix_elements[$n + 1]);
+                                               }
+                                               if ($space_after) {
+                                                       $good .= " ";
+                                               }
+                                       }
+
+                               # '*' as part of a type definition -- reported already.
+                               } elsif ($opv eq '*_') {
+                                       #warn "'*' is part of type\n";
+
+                               # unary operators should have a space before and
+                               # none after.  May be left adjacent to another
+                               # unary operator, or a cast
+                               } elsif ($op eq '!' || $op eq '~' ||
+                                        $opv eq '*U' || $opv eq '-U' ||
+                                        $opv eq '&U' || $opv eq '&&U') {
+                                       if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) {
+                                               if (ERROR("SPACING",
+                                                         "space required before that '$op' $at\n" . $hereptr)) {
+                                                       if ($n != $last_after + 2) {
+                                                               $good = $fix_elements[$n] . " " . ltrim($fix_elements[$n + 1]);
+                                                               $line_fixed = 1;
+                                                       }
+                                               }
+                                       }
+                                       if ($op eq '*' && $cc =~/\s*$Modifier\b/) {
+                                               # A unary '*' may be const
+
+                                       } elsif ($ctx =~ /.xW/) {
+                                               if (ERROR("SPACING",
+                                                         "space prohibited after that '$op' $at\n" . $hereptr)) {
+                                                       $good = $fix_elements[$n] . rtrim($fix_elements[$n + 1]);
+                                                       if (defined $fix_elements[$n + 2]) {
+                                                               $fix_elements[$n + 2] =~ s/^\s+//;
+                                                       }
+                                                       $line_fixed = 1;
+                                               }
+                                       }
+
+                               # unary ++ and unary -- are allowed no space on one side.
+                               } elsif ($op eq '++' or $op eq '--') {
+                                       if ($ctx !~ /[WEOBC]x[^W]/ && $ctx !~ /[^W]x[WOBEC]/) {
+                                               if (ERROR("SPACING",
+                                                         "space required one side of that '$op' $at\n" . $hereptr)) {
+                                                       $good = $fix_elements[$n] . trim($fix_elements[$n + 1]) . " ";
+                                                       $line_fixed = 1;
+                                               }
+                                       }
+                                       if ($ctx =~ /Wx[BE]/ ||
+                                           ($ctx =~ /Wx./ && $cc =~ /^;/)) {
+                                               if (ERROR("SPACING",
+                                                         "space prohibited before that '$op' $at\n" . $hereptr)) {
+                                                       $good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
+                                                       $line_fixed = 1;
+                                               }
+                                       }
+                                       if ($ctx =~ /ExW/) {
+                                               if (ERROR("SPACING",
+                                                         "space prohibited after that '$op' $at\n" . $hereptr)) {
+                                                       $good = $fix_elements[$n] . trim($fix_elements[$n + 1]);
+                                                       if (defined $fix_elements[$n + 2]) {
+                                                               $fix_elements[$n + 2] =~ s/^\s+//;
+                                                       }
+                                                       $line_fixed = 1;
+                                               }
+                                       }
+
+                               # << and >> may either have or not have spaces both sides
+                               } elsif ($op eq '<<' or $op eq '>>' or
+                                        $op eq '&' or $op eq '^' or $op eq '|' or
+                                        $op eq '+' or $op eq '-' or
+                                        $op eq '*' or $op eq '/' or
+                                        $op eq '%')
+                               {
+                                       if ($check) {
+                                               if (defined $fix_elements[$n + 2] && $ctx !~ /[EW]x[EW]/) {
+                                                       if (CHK("SPACING",
+                                                               "spaces preferred around that '$op' $at\n" . $hereptr)) {
+                                                               $good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
+                                                               $fix_elements[$n + 2] =~ s/^\s+//;
+                                                               $line_fixed = 1;
+                                                       }
+                                               } elsif (!defined $fix_elements[$n + 2] && $ctx !~ /Wx[OE]/) {
+                                                       if (CHK("SPACING",
+                                                               "space preferred before that '$op' $at\n" . $hereptr)) {
+                                                               $good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]);
+                                                               $line_fixed = 1;
+                                                       }
+                                               }
+                                       } elsif ($ctx =~ /Wx[^WCE]|[^WCE]xW/) {
+                                               if (ERROR("SPACING",
+                                                         "need consistent spacing around '$op' $at\n" . $hereptr)) {
+                                                       $good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
+                                                       if (defined $fix_elements[$n + 2]) {
+                                                               $fix_elements[$n + 2] =~ s/^\s+//;
+                                                       }
+                                                       $line_fixed = 1;
+                                               }
+                                       }
+
+                               # A colon needs no spaces before when it is
+                               # terminating a case value or a label.
+                               } elsif ($opv eq ':C' || $opv eq ':L') {
+                                       if ($ctx =~ /Wx./) {
+                                               if (ERROR("SPACING",
+                                                         "space prohibited before that '$op' $at\n" . $hereptr)) {
+                                                       $good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
+                                                       $line_fixed = 1;
+                                               }
+                                       }
+
+                               # All the others need spaces both sides.
+                               } elsif ($ctx !~ /[EWC]x[CWE]/) {
+                                       my $ok = 0;
+
+                                       # Ignore email addresses <foo@bar>
+                                       if (($op eq '<' &&
+                                            $cc =~ /^\S+\@\S+>/) ||
+                                           ($op eq '>' &&
+                                            $ca =~ /<\S+\@\S+$/))
+                                       {
+                                               $ok = 1;
+                                       }
+
+                                       # for asm volatile statements
+                                       # ignore a colon with another
+                                       # colon immediately before or after
+                                       if (($op eq ':') &&
+                                           ($ca =~ /:$/ || $cc =~ /^:/)) {
+                                               $ok = 1;
+                                       }
+
+                                       # messages are ERROR, but ?: are CHK
+                                       if ($ok == 0) {
+                                               my $msg_type = \&ERROR;
+                                               $msg_type = \&CHK if (($op eq '?:' || $op eq '?' || $op eq ':') && $ctx =~ /VxV/);
+
+                                               if (&{$msg_type}("SPACING",
+                                                                "spaces required around that '$op' $at\n" . $hereptr)) {
+                                                       $good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
+                                                       if (defined $fix_elements[$n + 2]) {
+                                                               $fix_elements[$n + 2] =~ s/^\s+//;
+                                                       }
+                                                       $line_fixed = 1;
+                                               }
+                                       }
+                               }
+                               $off += length($elements[$n + 1]);
+
+##                             print("n: <$n> GOOD: <$good>\n");
+
+                               $fixed_line = $fixed_line . $good;
+                       }
+
+                       if (($#elements % 2) == 0) {
+                               $fixed_line = $fixed_line . $fix_elements[$#elements];
+                       }
+
+                       if ($fix && $line_fixed && $fixed_line ne $fixed[$fixlinenr]) {
+                               $fixed[$fixlinenr] = $fixed_line;
+                       }
+
+
+               }
+
+# check for whitespace before a non-naked semicolon
+               if ($line =~ /^\+.*\S\s+;\s*$/) {
+                       if (WARN("SPACING",
+                                "space prohibited before semicolon\n" . $herecurr) &&
+                           $fix) {
+                               1 while $fixed[$fixlinenr] =~
+                                   s/^(\+.*\S)\s+;/$1;/;
+                       }
+               }
+
+# check for multiple assignments
+               if ($line =~ /^.\s*$Lval\s*=\s*$Lval\s*=(?!=)/) {
+                       CHK("MULTIPLE_ASSIGNMENTS",
+                           "multiple assignments should be avoided\n" . $herecurr);
+               }
+
+## # check for multiple declarations, allowing for a function declaration
+## # continuation.
+##             if ($line =~ /^.\s*$Type\s+$Ident(?:\s*=[^,{]*)?\s*,\s*$Ident.*/ &&
+##                 $line !~ /^.\s*$Type\s+$Ident(?:\s*=[^,{]*)?\s*,\s*$Type\s*$Ident.*/) {
+##
+##                     # Remove any bracketed sections to ensure we do not
+##                     # falsly report the parameters of functions.
+##                     my $ln = $line;
+##                     while ($ln =~ s/\([^\(\)]*\)//g) {
+##                     }
+##                     if ($ln =~ /,/) {
+##                             WARN("MULTIPLE_DECLARATION",
+##                                  "declaring multiple variables together should be avoided\n" . $herecurr);
+##                     }
+##             }
+
+#need space before brace following if, while, etc
+               if (($line =~ /\(.*\)\{/ && $line !~ /\($Type\)\{/) ||
+                   $line =~ /do\{/) {
+                       if (ERROR("SPACING",
+                                 "space required before the open brace '{'\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/;
+                       }
+               }
+
+## # check for blank lines before declarations
+##             if ($line =~ /^.\t+$Type\s+$Ident(?:\s*=.*)?;/ &&
+##                 $prevrawline =~ /^.\s*$/) {
+##                     WARN("SPACING",
+##                          "No blank lines before declarations\n" . $hereprev);
+##             }
+##
+
+# closing brace should have a space following it when it has anything
+# on the line
+               if ($line =~ /}(?!(?:,|;|\)))\S/) {
+                       if (ERROR("SPACING",
+                                 "space required after that close brace '}'\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~
+                                   s/}((?!(?:,|;|\)))\S)/} $1/;
+                       }
+               }
+
+# check spacing on square brackets
+               if ($line =~ /\[\s/ && $line !~ /\[\s*$/) {
+                       if (ERROR("SPACING",
+                                 "space prohibited after that open square bracket '['\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~
+                                   s/\[\s+/\[/;
+                       }
+               }
+               if ($line =~ /\s\]/) {
+                       if (ERROR("SPACING",
+                                 "space prohibited before that close square bracket ']'\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~
+                                   s/\s+\]/\]/;
+                       }
+               }
+
+# check spacing on parentheses
+               if ($line =~ /\(\s/ && $line !~ /\(\s*(?:\\)?$/ &&
+                   $line !~ /for\s*\(\s+;/) {
+                       if (ERROR("SPACING",
+                                 "space prohibited after that open parenthesis '('\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~
+                                   s/\(\s+/\(/;
+                       }
+               }
+               if ($line =~ /(\s+)\)/ && $line !~ /^.\s*\)/ &&
+                   $line !~ /for\s*\(.*;\s+\)/ &&
+                   $line !~ /:\s+\)/) {
+                       if (ERROR("SPACING",
+                                 "space prohibited before that close parenthesis ')'\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~
+                                   s/\s+\)/\)/;
+                       }
+               }
+
+# check unnecessary parentheses around addressof/dereference single $Lvals
+# ie: &(foo->bar) should be &foo->bar and *(foo->bar) should be *foo->bar
+
+               while ($line =~ /(?:[^&]&\s*|\*)\(\s*($Ident\s*(?:$Member\s*)+)\s*\)/g) {
+                       my $var = $1;
+                       if (CHK("UNNECESSARY_PARENTHESES",
+                               "Unnecessary parentheses around $var\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/\(\s*\Q$var\E\s*\)/$var/;
+                       }
+               }
+
+# check for unnecessary parentheses around function pointer uses
+# ie: (foo->bar)(); should be foo->bar();
+# but not "if (foo->bar) (" to avoid some false positives
+               if ($line =~ /(\bif\s*|)(\(\s*$Ident\s*(?:$Member\s*)+\))[ \t]*\(/ && $1 !~ /^if/) {
+                       my $var = $2;
+                       if (CHK("UNNECESSARY_PARENTHESES",
+                               "Unnecessary parentheses around function pointer $var\n" . $herecurr) &&
+                           $fix) {
+                               my $var2 = deparenthesize($var);
+                               $var2 =~ s/\s//g;
+                               $fixed[$fixlinenr] =~ s/\Q$var\E/$var2/;
+                       }
+               }
+
+#goto labels aren't indented, allow a single space however
+               if ($line=~/^.\s+[A-Za-z\d_]+:(?![0-9]+)/ and
+                  !($line=~/^. [A-Za-z\d_]+:/) and !($line=~/^.\s+default:/)) {
+                       if (WARN("INDENTED_LABEL",
+                                "labels should not be indented\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~
+                                   s/^(.)\s+/$1/;
+                       }
+               }
+
+# return is not a function
+               if (defined($stat) && $stat =~ /^.\s*return(\s*)\(/s) {
+                       my $spacing = $1;
+                       if ($^V && $^V ge 5.10.0 &&
+                           $stat =~ /^.\s*return\s*($balanced_parens)\s*;\s*$/) {
+                               my $value = $1;
+                               $value = deparenthesize($value);
+                               if ($value =~ m/^\s*$FuncArg\s*(?:\?|$)/) {
+                                       ERROR("RETURN_PARENTHESES",
+                                             "return is not a function, parentheses are not required\n" . $herecurr);
+                               }
+                       } elsif ($spacing !~ /\s+/) {
+                               ERROR("SPACING",
+                                     "space required before the open parenthesis '('\n" . $herecurr);
+                       }
+               }
+
+# unnecessary return in a void function
+# at end-of-function, with the previous line a single leading tab, then return;
+# and the line before that not a goto label target like "out:"
+               if ($sline =~ /^[ \+]}\s*$/ &&
+                   $prevline =~ /^\+\treturn\s*;\s*$/ &&
+                   $linenr >= 3 &&
+                   $lines[$linenr - 3] =~ /^[ +]/ &&
+                   $lines[$linenr - 3] !~ /^[ +]\s*$Ident\s*:/) {
+                       WARN("RETURN_VOID",
+                            "void function return statements are not generally useful\n" . $hereprev);
+               }
+
+# if statements using unnecessary parentheses - ie: if ((foo == bar))
+               if ($^V && $^V ge 5.10.0 &&
+                   $line =~ /\bif\s*((?:\(\s*){2,})/) {
+                       my $openparens = $1;
+                       my $count = $openparens =~ tr@\(@\(@;
+                       my $msg = "";
+                       if ($line =~ /\bif\s*(?:\(\s*){$count,$count}$LvalOrFunc\s*($Compare)\s*$LvalOrFunc(?:\s*\)){$count,$count}/) {
+                               my $comp = $4;  #Not $1 because of $LvalOrFunc
+                               $msg = " - maybe == should be = ?" if ($comp eq "==");
+                               WARN("UNNECESSARY_PARENTHESES",
+                                    "Unnecessary parentheses$msg\n" . $herecurr);
+                       }
+               }
+
+# comparisons with a constant or upper case identifier on the left
+#      avoid cases like "foo + BAR < baz"
+#      only fix matches surrounded by parentheses to avoid incorrect
+#      conversions like "FOO < baz() + 5" being "misfixed" to "baz() > FOO + 5"
+               if ($^V && $^V ge 5.10.0 &&
+                   $line =~ /^\+(.*)\b($Constant|[A-Z_][A-Z0-9_]*)\s*($Compare)\s*($LvalOrFunc)/) {
+                       my $lead = $1;
+                       my $const = $2;
+                       my $comp = $3;
+                       my $to = $4;
+                       my $newcomp = $comp;
+                       if ($lead !~ /$Operators\s*$/ &&
+                           $to !~ /^(?:Constant|[A-Z_][A-Z0-9_]*)$/ &&
+                           WARN("CONSTANT_COMPARISON",
+                                "Comparisons should place the constant on the right side of the test\n" . $herecurr) &&
+                           $fix) {
+                               if ($comp eq "<") {
+                                       $newcomp = ">";
+                               } elsif ($comp eq "<=") {
+                                       $newcomp = ">=";
+                               } elsif ($comp eq ">") {
+                                       $newcomp = "<";
+                               } elsif ($comp eq ">=") {
+                                       $newcomp = "<=";
+                               }
+                               $fixed[$fixlinenr] =~ s/\(\s*\Q$const\E\s*$Compare\s*\Q$to\E\s*\)/($to $newcomp $const)/;
+                       }
+               }
+
+# Return of what appears to be an errno should normally be negative
+               if ($sline =~ /\breturn(?:\s*\(+\s*|\s+)(E[A-Z]+)(?:\s*\)+\s*|\s*)[;:,]/) {
+                       my $name = $1;
+                       if ($name ne 'EOF' && $name ne 'ERROR') {
+                               WARN("USE_NEGATIVE_ERRNO",
+                                    "return of an errno should typically be negative (ie: return -$1)\n" . $herecurr);
+                       }
+               }
+
+# Need a space before open parenthesis after if, while etc
+               if ($line =~ /\b(if|while|for|switch)\(/) {
+                       if (ERROR("SPACING",
+                                 "space required before the open parenthesis '('\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~
+                                   s/\b(if|while|for|switch)\(/$1 \(/;
+                       }
+               }
+
+# Check for illegal assignment in if conditional -- and check for trailing
+# statements after the conditional.
+               if ($line =~ /do\s*(?!{)/) {
+                       ($stat, $cond, $line_nr_next, $remain_next, $off_next) =
+                               ctx_statement_block($linenr, $realcnt, 0)
+                                       if (!defined $stat);
+                       my ($stat_next) = ctx_statement_block($line_nr_next,
+                                               $remain_next, $off_next);
+                       $stat_next =~ s/\n./\n /g;
+                       ##print "stat<$stat> stat_next<$stat_next>\n";
+
+                       if ($stat_next =~ /^\s*while\b/) {
+                               # If the statement carries leading newlines,
+                               # then count those as offsets.
+                               my ($whitespace) =
+                                       ($stat_next =~ /^((?:\s*\n[+-])*\s*)/s);
+                               my $offset =
+                                       statement_rawlines($whitespace) - 1;
+
+                               $suppress_whiletrailers{$line_nr_next +
+                                                               $offset} = 1;
+                       }
+               }
+               if (!defined $suppress_whiletrailers{$linenr} &&
+                   defined($stat) && defined($cond) &&
+                   $line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) {
+                       my ($s, $c) = ($stat, $cond);
+
+                       if ($c =~ /\bif\s*\(.*[^<>!=]=[^=].*/s) {
+                               ERROR("ASSIGN_IN_IF",
+                                     "do not use assignment in if condition\n" . $herecurr);
+                       }
+
+                       # Find out what is on the end of the line after the
+                       # conditional.
+                       substr($s, 0, length($c), '');
+                       $s =~ s/\n.*//g;
+                       $s =~ s/$;//g;  # Remove any comments
+                       if (length($c) && $s !~ /^\s*{?\s*\\*\s*$/ &&
+                           $c !~ /}\s*while\s*/)
+                       {
+                               # Find out how long the conditional actually is.
+                               my @newlines = ($c =~ /\n/gs);
+                               my $cond_lines = 1 + $#newlines;
+                               my $stat_real = '';
+
+                               $stat_real = raw_line($linenr, $cond_lines)
+                                                       . "\n" if ($cond_lines);
+                               if (defined($stat_real) && $cond_lines > 1) {
+                                       $stat_real = "[...]\n$stat_real";
+                               }
+
+                               ERROR("TRAILING_STATEMENTS",
+                                     "trailing statements should be on next line\n" . $herecurr . $stat_real);
+                       }
+               }
+
+# Check for bitwise tests written as boolean
+               if ($line =~ /
+                       (?:
+                               (?:\[|\(|\&\&|\|\|)
+                               \s*0[xX][0-9]+\s*
+                               (?:\&\&|\|\|)
+                       |
+                               (?:\&\&|\|\|)
+                               \s*0[xX][0-9]+\s*
+                               (?:\&\&|\|\||\)|\])
+                       )/x)
+               {
+                       WARN("HEXADECIMAL_BOOLEAN_TEST",
+                            "boolean test with hexadecimal, perhaps just 1 \& or \|?\n" . $herecurr);
+               }
+
+# if and else should not have general statements after it
+               if ($line =~ /^.\s*(?:}\s*)?else\b(.*)/) {
+                       my $s = $1;
+                       $s =~ s/$;//g;  # Remove any comments
+                       if ($s !~ /^\s*(?:\sif|(?:{|)\s*\\?\s*$)/) {
+                               ERROR("TRAILING_STATEMENTS",
+                                     "trailing statements should be on next line\n" . $herecurr);
+                       }
+               }
+# if should not continue a brace
+               if ($line =~ /}\s*if\b/) {
+                       ERROR("TRAILING_STATEMENTS",
+                             "trailing statements should be on next line (or did you mean 'else if'?)\n" .
+                               $herecurr);
+               }
+# case and default should not have general statements after them
+               if ($line =~ /^.\s*(?:case\s*.*|default\s*):/g &&
+                   $line !~ /\G(?:
+                       (?:\s*$;*)(?:\s*{)?(?:\s*$;*)(?:\s*\\)?\s*$|
+                       \s*return\s+
+                   )/xg)
+               {
+                       ERROR("TRAILING_STATEMENTS",
+                             "trailing statements should be on next line\n" . $herecurr);
+               }
+
+               # Check for }<nl>else {, these must be at the same
+               # indent level to be relevant to each other.
+               if ($prevline=~/}\s*$/ and $line=~/^.\s*else\s*/ &&
+                   $previndent == $indent) {
+                       if (ERROR("ELSE_AFTER_BRACE",
+                                 "else should follow close brace '}'\n" . $hereprev) &&
+                           $fix && $prevline =~ /^\+/ && $line =~ /^\+/) {
+                               fix_delete_line($fixlinenr - 1, $prevrawline);
+                               fix_delete_line($fixlinenr, $rawline);
+                               my $fixedline = $prevrawline;
+                               $fixedline =~ s/}\s*$//;
+                               if ($fixedline !~ /^\+\s*$/) {
+                                       fix_insert_line($fixlinenr, $fixedline);
+                               }
+                               $fixedline = $rawline;
+                               $fixedline =~ s/^(.\s*)else/$1} else/;
+                               fix_insert_line($fixlinenr, $fixedline);
+                       }
+               }
+
+               if ($prevline=~/}\s*$/ and $line=~/^.\s*while\s*/ &&
+                   $previndent == $indent) {
+                       my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0);
+
+                       # Find out what is on the end of the line after the
+                       # conditional.
+                       substr($s, 0, length($c), '');
+                       $s =~ s/\n.*//g;
+
+                       if ($s =~ /^\s*;/) {
+                               if (ERROR("WHILE_AFTER_BRACE",
+                                         "while should follow close brace '}'\n" . $hereprev) &&
+                                   $fix && $prevline =~ /^\+/ && $line =~ /^\+/) {
+                                       fix_delete_line($fixlinenr - 1, $prevrawline);
+                                       fix_delete_line($fixlinenr, $rawline);
+                                       my $fixedline = $prevrawline;
+                                       my $trailing = $rawline;
+                                       $trailing =~ s/^\+//;
+                                       $trailing = trim($trailing);
+                                       $fixedline =~ s/}\s*$/} $trailing/;
+                                       fix_insert_line($fixlinenr, $fixedline);
+                               }
+                       }
+               }
+
+#Specific variable tests
+               while ($line =~ m{($Constant|$Lval)}g) {
+                       my $var = $1;
+
+#gcc binary extension
+                       if ($var =~ /^$Binary$/) {
+                               if (WARN("GCC_BINARY_CONSTANT",
+                                        "Avoid gcc v4.3+ binary constant extension: <$var>\n" . $herecurr) &&
+                                   $fix) {
+                                       my $hexval = sprintf("0x%x", oct($var));
+                                       $fixed[$fixlinenr] =~
+                                           s/\b$var\b/$hexval/;
+                               }
+                       }
+
+#CamelCase
+                       if ($var !~ /^$Constant$/ &&
+                           $var =~ /[A-Z][a-z]|[a-z][A-Z]/ &&
+#Ignore Page<foo> variants
+                           $var !~ /^(?:Clear|Set|TestClear|TestSet|)Page[A-Z]/ &&
+#Ignore SI style variants like nS, mV and dB (ie: max_uV, regulator_min_uA_show)
+                           $var !~ /^(?:[a-z_]*?)_?[a-z][A-Z](?:_[a-z_]+)?$/ &&
+#Ignore some three character SI units explicitly, like MiB and KHz
+                           $var !~ /^(?:[a-z_]*?)_?(?:[KMGT]iB|[KMGT]?Hz)(?:_[a-z_]+)?$/) {
+                               while ($var =~ m{($Ident)}g) {
+                                       my $word = $1;
+                                       next if ($word !~ /[A-Z][a-z]|[a-z][A-Z]/);
+                                       if ($check) {
+                                               seed_camelcase_includes();
+                                               if (!$file && !$camelcase_file_seeded) {
+                                                       seed_camelcase_file($realfile);
+                                                       $camelcase_file_seeded = 1;
+                                               }
+                                       }
+                                       if (!defined $camelcase{$word}) {
+                                               $camelcase{$word} = 1;
+                                               CHK("CAMELCASE",
+                                                   "Avoid CamelCase: <$word>\n" . $herecurr);
+                                       }
+                               }
+                       }
+               }
+
+#no spaces allowed after \ in define
+               if ($line =~ /\#\s*define.*\\\s+$/) {
+                       if (WARN("WHITESPACE_AFTER_LINE_CONTINUATION",
+                                "Whitespace after \\ makes next lines useless\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/\s+$//;
+                       }
+               }
+
+# warn if <asm/foo.h> is #included and <linux/foo.h> is available and includes
+# itself <asm/foo.h> (uses RAW line)
+               if ($tree && $rawline =~ m{^.\s*\#\s*include\s*\<asm\/(.*)\.h\>}) {
+                       my $file = "$1.h";
+                       my $checkfile = "include/linux/$file";
+                       if (-f "$root/$checkfile" &&
+                           $realfile ne $checkfile &&
+                           $1 !~ /$allowed_asm_includes/)
+                       {
+                               my $asminclude = `grep -Ec "#include\\s+<asm/$file>" $root/$checkfile`;
+                               if ($asminclude > 0) {
+                                       if ($realfile =~ m{^arch/}) {
+                                               CHK("ARCH_INCLUDE_LINUX",
+                                                   "Consider using #include <linux/$file> instead of <asm/$file>\n" . $herecurr);
+                                       } else {
+                                               WARN("INCLUDE_LINUX",
+                                                    "Use #include <linux/$file> instead of <asm/$file>\n" . $herecurr);
+                                       }
+                               }
+                       }
+               }
+
+# multi-statement macros should be enclosed in a do while loop, grab the
+# first statement and ensure its the whole macro if its not enclosed
+# in a known good container
+               if ($realfile !~ m@/vmlinux.lds.h$@ &&
+                   $line =~ /^.\s*\#\s*define\s*$Ident(\()?/) {
+                       my $ln = $linenr;
+                       my $cnt = $realcnt;
+                       my ($off, $dstat, $dcond, $rest);
+                       my $ctx = '';
+                       my $has_flow_statement = 0;
+                       my $has_arg_concat = 0;
+                       ($dstat, $dcond, $ln, $cnt, $off) =
+                               ctx_statement_block($linenr, $realcnt, 0);
+                       $ctx = $dstat;
+                       #print "dstat<$dstat> dcond<$dcond> cnt<$cnt> off<$off>\n";
+                       #print "LINE<$lines[$ln-1]> len<" . length($lines[$ln-1]) . "\n";
+
+                       $has_flow_statement = 1 if ($ctx =~ /\b(goto|return)\b/);
+                       $has_arg_concat = 1 if ($ctx =~ /\#\#/ && $ctx !~ /\#\#\s*(?:__VA_ARGS__|args)\b/);
+
+                       $dstat =~ s/^.\s*\#\s*define\s+$Ident(?:\([^\)]*\))?\s*//;
+                       $dstat =~ s/$;//g;
+                       $dstat =~ s/\\\n.//g;
+                       $dstat =~ s/^\s*//s;
+                       $dstat =~ s/\s*$//s;
+
+                       # Flatten any parentheses and braces
+                       while ($dstat =~ s/\([^\(\)]*\)/1/ ||
+                              $dstat =~ s/\{[^\{\}]*\}/1/ ||
+                              $dstat =~ s/.\[[^\[\]]*\]/1/)
+                       {
+                       }
+
+                       # Flatten any obvious string concatentation.
+                       while ($dstat =~ s/($String)\s*$Ident/$1/ ||
+                              $dstat =~ s/$Ident\s*($String)/$1/)
+                       {
+                       }
+
+                       # Make asm volatile uses seem like a generic function
+                       $dstat =~ s/\b_*asm_*\s+_*volatile_*\b/asm_volatile/g;
+
+                       my $exceptions = qr{
+                               $Declare|
+                               module_param_named|
+                               MODULE_PARM_DESC|
+                               DECLARE_PER_CPU|
+                               DEFINE_PER_CPU|
+                               __typeof__\(|
+                               union|
+                               struct|
+                               \.$Ident\s*=\s*|
+                               ^\"|\"$|
+                               ^\[
+                       }x;
+                       #print "REST<$rest> dstat<$dstat> ctx<$ctx>\n";
+                       if ($dstat ne '' &&
+                           $dstat !~ /^(?:$Ident|-?$Constant),$/ &&                    # 10, // foo(),
+                           $dstat !~ /^(?:$Ident|-?$Constant);$/ &&                    # foo();
+                           $dstat !~ /^[!~-]?(?:$Lval|$Constant)$/ &&          # 10 // foo() // !foo // ~foo // -foo // foo->bar // foo.bar->baz
+                           $dstat !~ /^'X'$/ && $dstat !~ /^'XX'$/ &&                  # character constants
+                           $dstat !~ /$exceptions/ &&
+                           $dstat !~ /^\.$Ident\s*=/ &&                                # .foo =
+                           $dstat !~ /^(?:\#\s*$Ident|\#\s*$Constant)\s*$/ &&          # stringification #foo
+                           $dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ &&       # do {...} while (...); // do {...} while (...)
+                           $dstat !~ /^for\s*$Constant$/ &&                            # for (...)
+                           $dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ &&   # for (...) bar()
+                           $dstat !~ /^do\s*{/ &&                                      # do {...
+                           $dstat !~ /^\(\{/ &&                                                # ({...
+                           $ctx !~ /^.\s*#\s*define\s+TRACE_(?:SYSTEM|INCLUDE_FILE|INCLUDE_PATH)\b/)
+                       {
+                               $ctx =~ s/\n*$//;
+                               my $herectx = $here . "\n";
+                               my $cnt = statement_rawlines($ctx);
+
+                               for (my $n = 0; $n < $cnt; $n++) {
+                                       $herectx .= raw_line($linenr, $n) . "\n";
+                               }
+
+                               if ($dstat =~ /;/) {
+                                       ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
+                                             "Macros with multiple statements should be enclosed in a do - while loop\n" . "$herectx");
+                               } else {
+                                       ERROR("COMPLEX_MACRO",
+                                             "Macros with complex values should be enclosed in parentheses\n" . "$herectx");
+                               }
+                       }
+
+# check for macros with flow control, but without ## concatenation
+# ## concatenation is commonly a macro that defines a function so ignore those
+                       if ($has_flow_statement && !$has_arg_concat) {
+                               my $herectx = $here . "\n";
+                               my $cnt = statement_rawlines($ctx);
+
+                               for (my $n = 0; $n < $cnt; $n++) {
+                                       $herectx .= raw_line($linenr, $n) . "\n";
+                               }
+                               WARN("MACRO_WITH_FLOW_CONTROL",
+                                    "Macros with flow control statements should be avoided\n" . "$herectx");
+                       }
+
+# check for line continuations outside of #defines, preprocessor #, and asm
+
+               } else {
+                       if ($prevline !~ /^..*\\$/ &&
+                           $line !~ /^\+\s*\#.*\\$/ &&         # preprocessor
+                           $line !~ /^\+.*\b(__asm__|asm)\b.*\\$/ &&   # asm
+                           $line =~ /^\+.*\\$/) {
+                               WARN("LINE_CONTINUATIONS",
+                                    "Avoid unnecessary line continuations\n" . $herecurr);
+                       }
+               }
+
+# do {} while (0) macro tests:
+# single-statement macros do not need to be enclosed in do while (0) loop,
+# macro should not end with a semicolon
+               if ($^V && $^V ge 5.10.0 &&
+                   $realfile !~ m@/vmlinux.lds.h$@ &&
+                   $line =~ /^.\s*\#\s*define\s+$Ident(\()?/) {
+                       my $ln = $linenr;
+                       my $cnt = $realcnt;
+                       my ($off, $dstat, $dcond, $rest);
+                       my $ctx = '';
+                       ($dstat, $dcond, $ln, $cnt, $off) =
+                               ctx_statement_block($linenr, $realcnt, 0);
+                       $ctx = $dstat;
+
+                       $dstat =~ s/\\\n.//g;
+                       $dstat =~ s/$;/ /g;
+
+                       if ($dstat =~ /^\+\s*#\s*define\s+$Ident\s*${balanced_parens}\s*do\s*{(.*)\s*}\s*while\s*\(\s*0\s*\)\s*([;\s]*)\s*$/) {
+                               my $stmts = $2;
+                               my $semis = $3;
+
+                               $ctx =~ s/\n*$//;
+                               my $cnt = statement_rawlines($ctx);
+                               my $herectx = $here . "\n";
+
+                               for (my $n = 0; $n < $cnt; $n++) {
+                                       $herectx .= raw_line($linenr, $n) . "\n";
+                               }
+
+                               if (($stmts =~ tr/;/;/) == 1 &&
+                                   $stmts !~ /^\s*(if|while|for|switch)\b/) {
+                                       WARN("SINGLE_STATEMENT_DO_WHILE_MACRO",
+                                            "Single statement macros should not use a do {} while (0) loop\n" . "$herectx");
+                               }
+                               if (defined $semis && $semis ne "") {
+                                       WARN("DO_WHILE_MACRO_WITH_TRAILING_SEMICOLON",
+                                            "do {} while (0) macros should not be semicolon terminated\n" . "$herectx");
+                               }
+                       } elsif ($dstat =~ /^\+\s*#\s*define\s+$Ident.*;\s*$/) {
+                               $ctx =~ s/\n*$//;
+                               my $cnt = statement_rawlines($ctx);
+                               my $herectx = $here . "\n";
+
+                               for (my $n = 0; $n < $cnt; $n++) {
+                                       $herectx .= raw_line($linenr, $n) . "\n";
+                               }
+
+                               WARN("TRAILING_SEMICOLON",
+                                    "macros should not use a trailing semicolon\n" . "$herectx");
+                       }
+               }
+
+# make sure symbols are always wrapped with VMLINUX_SYMBOL() ...
+# all assignments may have only one of the following with an assignment:
+#      .
+#      ALIGN(...)
+#      VMLINUX_SYMBOL(...)
+               if ($realfile eq 'vmlinux.lds.h' && $line =~ /(?:(?:^|\s)$Ident\s*=|=\s*$Ident(?:\s|$))/) {
+                       WARN("MISSING_VMLINUX_SYMBOL",
+                            "vmlinux.lds.h needs VMLINUX_SYMBOL() around C-visible symbols\n" . $herecurr);
+               }
+
+# check for redundant bracing round if etc
+               if ($line =~ /(^.*)\bif\b/ && $1 !~ /else\s*$/) {
+                       my ($level, $endln, @chunks) =
+                               ctx_statement_full($linenr, $realcnt, 1);
+                       #print "chunks<$#chunks> linenr<$linenr> endln<$endln> level<$level>\n";
+                       #print "APW: <<$chunks[1][0]>><<$chunks[1][1]>>\n";
+                       if ($#chunks > 0 && $level == 0) {
+                               my @allowed = ();
+                               my $allow = 0;
+                               my $seen = 0;
+                               my $herectx = $here . "\n";
+                               my $ln = $linenr - 1;
+                               for my $chunk (@chunks) {
+                                       my ($cond, $block) = @{$chunk};
+
+                                       # If the condition carries leading newlines, then count those as offsets.
+                                       my ($whitespace) = ($cond =~ /^((?:\s*\n[+-])*\s*)/s);
+                                       my $offset = statement_rawlines($whitespace) - 1;
+
+                                       $allowed[$allow] = 0;
+                                       #print "COND<$cond> whitespace<$whitespace> offset<$offset>\n";
+
+                                       # We have looked at and allowed this specific line.
+                                       $suppress_ifbraces{$ln + $offset} = 1;
+
+                                       $herectx .= "$rawlines[$ln + $offset]\n[...]\n";
+                                       $ln += statement_rawlines($block) - 1;
+
+                                       substr($block, 0, length($cond), '');
+
+                                       $seen++ if ($block =~ /^\s*{/);
+
+                                       #print "cond<$cond> block<$block> allowed<$allowed[$allow]>\n";
+                                       if (statement_lines($cond) > 1) {
+                                               #print "APW: ALLOWED: cond<$cond>\n";
+                                               $allowed[$allow] = 1;
+                                       }
+                                       if ($block =~/\b(?:if|for|while)\b/) {
+                                               #print "APW: ALLOWED: block<$block>\n";
+                                               $allowed[$allow] = 1;
+                                       }
+                                       if (statement_block_size($block) > 1) {
+                                               #print "APW: ALLOWED: lines block<$block>\n";
+                                               $allowed[$allow] = 1;
+                                       }
+                                       $allow++;
+                               }
+                               if ($seen) {
+                                       my $sum_allowed = 0;
+                                       foreach (@allowed) {
+                                               $sum_allowed += $_;
+                                       }
+                                       if ($sum_allowed == 0) {
+                                               WARN("BRACES",
+                                                    "braces {} are not necessary for any arm of this statement\n" . $herectx);
+                                       } elsif ($sum_allowed != $allow &&
+                                                $seen != $allow) {
+                                               CHK("BRACES",
+                                                   "braces {} should be used on all arms of this statement\n" . $herectx);
+                                       }
+                               }
+                       }
+               }
+               if (!defined $suppress_ifbraces{$linenr - 1} &&
+                                       $line =~ /\b(if|while|for|else)\b/) {
+                       my $allowed = 0;
+
+                       # Check the pre-context.
+                       if (substr($line, 0, $-[0]) =~ /(\}\s*)$/) {
+                               #print "APW: ALLOWED: pre<$1>\n";
+                               $allowed = 1;
+                       }
+
+                       my ($level, $endln, @chunks) =
+                               ctx_statement_full($linenr, $realcnt, $-[0]);
+
+                       # Check the condition.
+                       my ($cond, $block) = @{$chunks[0]};
+                       #print "CHECKING<$linenr> cond<$cond> block<$block>\n";
+                       if (defined $cond) {
+                               substr($block, 0, length($cond), '');
+                       }
+                       if (statement_lines($cond) > 1) {
+                               #print "APW: ALLOWED: cond<$cond>\n";
+                               $allowed = 1;
+                       }
+                       if ($block =~/\b(?:if|for|while)\b/) {
+                               #print "APW: ALLOWED: block<$block>\n";
+                               $allowed = 1;
+                       }
+                       if (statement_block_size($block) > 1) {
+                               #print "APW: ALLOWED: lines block<$block>\n";
+                               $allowed = 1;
+                       }
+                       # Check the post-context.
+                       if (defined $chunks[1]) {
+                               my ($cond, $block) = @{$chunks[1]};
+                               if (defined $cond) {
+                                       substr($block, 0, length($cond), '');
+                               }
+                               if ($block =~ /^\s*\{/) {
+                                       #print "APW: ALLOWED: chunk-1 block<$block>\n";
+                                       $allowed = 1;
+                               }
+                       }
+                       if ($level == 0 && $block =~ /^\s*\{/ && !$allowed) {
+                               my $herectx = $here . "\n";
+                               my $cnt = statement_rawlines($block);
+
+                               for (my $n = 0; $n < $cnt; $n++) {
+                                       $herectx .= raw_line($linenr, $n) . "\n";
+                               }
+
+                               WARN("BRACES",
+                                    "braces {} are not necessary for single statement blocks\n" . $herectx);
+                       }
+               }
+
+# check for unnecessary blank lines around braces
+               if (($line =~ /^.\s*}\s*$/ && $prevrawline =~ /^.\s*$/)) {
+                       if (CHK("BRACES",
+                               "Blank lines aren't necessary before a close brace '}'\n" . $hereprev) &&
+                           $fix && $prevrawline =~ /^\+/) {
+                               fix_delete_line($fixlinenr - 1, $prevrawline);
+                       }
+               }
+               if (($rawline =~ /^.\s*$/ && $prevline =~ /^..*{\s*$/)) {
+                       if (CHK("BRACES",
+                               "Blank lines aren't necessary after an open brace '{'\n" . $hereprev) &&
+                           $fix) {
+                               fix_delete_line($fixlinenr, $rawline);
+                       }
+               }
+
+# no volatiles please
+               my $asm_volatile = qr{\b(__asm__|asm)\s+(__volatile__|volatile)\b};
+               if ($line =~ /\bvolatile\b/ && $line !~ /$asm_volatile/) {
+                       WARN("VOLATILE",
+                            "Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt\n" . $herecurr);
+               }
+
+# Check for user-visible strings broken across lines, which breaks the ability
+# to grep for the string.  Make exceptions when the previous string ends in a
+# newline (multiple lines in one string constant) or '\t', '\r', ';', or '{'
+# (common in inline assembly) or is a octal \123 or hexadecimal \xaf value
+               if ($line =~ /^\+\s*$String/ &&
+                   $prevline =~ /"\s*$/ &&
+                   $prevrawline !~ /(?:\\(?:[ntr]|[0-7]{1,3}|x[0-9a-fA-F]{1,2})|;\s*|\{\s*)"\s*$/) {
+                       if (WARN("SPLIT_STRING",
+                                "quoted string split across lines\n" . $hereprev) &&
+                                    $fix &&
+                                    $prevrawline =~ /^\+.*"\s*$/ &&
+                                    $last_coalesced_string_linenr != $linenr - 1) {
+                               my $extracted_string = get_quoted_string($line, $rawline);
+                               my $comma_close = "";
+                               if ($rawline =~ /\Q$extracted_string\E(\s*\)\s*;\s*$|\s*,\s*)/) {
+                                       $comma_close = $1;
+                               }
+
+                               fix_delete_line($fixlinenr - 1, $prevrawline);
+                               fix_delete_line($fixlinenr, $rawline);
+                               my $fixedline = $prevrawline;
+                               $fixedline =~ s/"\s*$//;
+                               $fixedline .= substr($extracted_string, 1) . trim($comma_close);
+                               fix_insert_line($fixlinenr - 1, $fixedline);
+                               $fixedline = $rawline;
+                               $fixedline =~ s/\Q$extracted_string\E\Q$comma_close\E//;
+                               if ($fixedline !~ /\+\s*$/) {
+                                       fix_insert_line($fixlinenr, $fixedline);
+                               }
+                               $last_coalesced_string_linenr = $linenr;
+                       }
+               }
+
+# check for missing a space in a string concatenation
+               if ($prevrawline =~ /[^\\]\w"$/ && $rawline =~ /^\+[\t ]+"\w/) {
+                       WARN('MISSING_SPACE',
+                            "break quoted strings at a space character\n" . $hereprev);
+               }
+
+# check for spaces before a quoted newline
+               if ($rawline =~ /^.*\".*\s\\n/) {
+                       if (WARN("QUOTED_WHITESPACE_BEFORE_NEWLINE",
+                                "unnecessary whitespace before a quoted newline\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/^(\+.*\".*)\s+\\n/$1\\n/;
+                       }
+
+               }
+
+# concatenated string without spaces between elements
+               if ($line =~ /$String[A-Z_]/ || $line =~ /[A-Za-z0-9_]$String/) {
+                       CHK("CONCATENATED_STRING",
+                           "Concatenated strings should use spaces between elements\n" . $herecurr);
+               }
+
+# uncoalesced string fragments
+               if ($line =~ /$String\s*"/) {
+                       WARN("STRING_FRAGMENTS",
+                            "Consecutive strings are generally better as a single string\n" . $herecurr);
+               }
+
+# check for %L{u,d,i} and 0x%[udi] in strings
+               my $string;
+               while ($line =~ /(?:^|")([X\t]*)(?:"|$)/g) {
+                       $string = substr($rawline, $-[1], $+[1] - $-[1]);
+                       $string =~ s/%%/__/g;
+                       if ($string =~ /(?<!%)%[\*\d\.\$]*L[udi]/) {
+                               WARN("PRINTF_L",
+                                    "\%Ld/%Lu are not-standard C, use %lld/%llu\n" . $herecurr);
+                               last;
+                       }
+                       if ($string =~ /0x%[\*\d\.\$\Llzth]*[udi]/) {
+                               ERROR("PRINTF_0xDECIMAL",
+                                     "Prefixing 0x with decimal output is defective\n" . $herecurr);
+                       }
+               }
+
+# check for line continuations in quoted strings with odd counts of "
+               if ($rawline =~ /\\$/ && $rawline =~ tr/"/"/ % 2) {
+                       WARN("LINE_CONTINUATIONS",
+                            "Avoid line continuations in quoted strings\n" . $herecurr);
+               }
+
+# warn about #if 0
+               if ($line =~ /^.\s*\#\s*if\s+0\b/) {
+                       CHK("REDUNDANT_CODE",
+                           "if this code is redundant consider removing it\n" .
+                               $herecurr);
+               }
+
+# check for needless "if (<foo>) fn(<foo>)" uses
+               if ($prevline =~ /\bif\s*\(\s*($Lval)\s*\)/) {
+                       my $tested = quotemeta($1);
+                       my $expr = '\s*\(\s*' . $tested . '\s*\)\s*;';
+                       if ($line =~ /\b(kfree|usb_free_urb|debugfs_remove(?:_recursive)?|(?:kmem_cache|mempool|dma_pool)_destroy)$expr/) {
+                               my $func = $1;
+                               if (WARN('NEEDLESS_IF',
+                                        "$func(NULL) is safe and this check is probably not required\n" . $hereprev) &&
+                                   $fix) {
+                                       my $do_fix = 1;
+                                       my $leading_tabs = "";
+                                       my $new_leading_tabs = "";
+                                       if ($lines[$linenr - 2] =~ /^\+(\t*)if\s*\(\s*$tested\s*\)\s*$/) {
+                                               $leading_tabs = $1;
+                                       } else {
+                                               $do_fix = 0;
+                                       }
+                                       if ($lines[$linenr - 1] =~ /^\+(\t+)$func\s*\(\s*$tested\s*\)\s*;\s*$/) {
+                                               $new_leading_tabs = $1;
+                                               if (length($leading_tabs) + 1 ne length($new_leading_tabs)) {
+                                                       $do_fix = 0;
+                                               }
+                                       } else {
+                                               $do_fix = 0;
+                                       }
+                                       if ($do_fix) {
+                                               fix_delete_line($fixlinenr - 1, $prevrawline);
+                                               $fixed[$fixlinenr] =~ s/^\+$new_leading_tabs/\+$leading_tabs/;
+                                       }
+                               }
+                       }
+               }
+
+# check for unnecessary "Out of Memory" messages
+               if ($line =~ /^\+.*\b$logFunctions\s*\(/ &&
+                   $prevline =~ /^[ \+]\s*if\s*\(\s*(\!\s*|NULL\s*==\s*)?($Lval)(\s*==\s*NULL\s*)?\s*\)/ &&
+                   (defined $1 || defined $3) &&
+                   $linenr > 3) {
+                       my $testval = $2;
+                       my $testline = $lines[$linenr - 3];
+
+                       my ($s, $c) = ctx_statement_block($linenr - 3, $realcnt, 0);
+#                      print("line: <$line>\nprevline: <$prevline>\ns: <$s>\nc: <$c>\n\n\n");
+
+                       if ($c =~ /(?:^|\n)[ \+]\s*(?:$Type\s*)?\Q$testval\E\s*=\s*(?:\([^\)]*\)\s*)?\s*(?:devm_)?(?:[kv][czm]alloc(?:_node|_array)?\b|kstrdup|(?:dev_)?alloc_skb)/) {
+                               WARN("OOM_MESSAGE",
+                                    "Possible unnecessary 'out of memory' message\n" . $hereprev);
+                       }
+               }
+
+# check for logging functions with KERN_<LEVEL>
+               if ($line !~ /printk(?:_ratelimited|_once)?\s*\(/ &&
+                   $line =~ /\b$logFunctions\s*\(.*\b(KERN_[A-Z]+)\b/) {
+                       my $level = $1;
+                       if (WARN("UNNECESSARY_KERN_LEVEL",
+                                "Possible unnecessary $level\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/\s*$level\s*//;
+                       }
+               }
+
+# check for mask then right shift without a parentheses
+               if ($^V && $^V ge 5.10.0 &&
+                   $line =~ /$LvalOrFunc\s*\&\s*($LvalOrFunc)\s*>>/ &&
+                   $4 !~ /^\&/) { # $LvalOrFunc may be &foo, ignore if so
+                       WARN("MASK_THEN_SHIFT",
+                            "Possible precedence defect with mask then right shift - may need parentheses\n" . $herecurr);
+               }
+
+# check for pointer comparisons to NULL
+               if ($^V && $^V ge 5.10.0) {
+                       while ($line =~ /\b$LvalOrFunc\s*(==|\!=)\s*NULL\b/g) {
+                               my $val = $1;
+                               my $equal = "!";
+                               $equal = "" if ($4 eq "!=");
+                               if (CHK("COMPARISON_TO_NULL",
+                                       "Comparison to NULL could be written \"${equal}${val}\"\n" . $herecurr) &&
+                                           $fix) {
+                                       $fixed[$fixlinenr] =~ s/\b\Q$val\E\s*(?:==|\!=)\s*NULL\b/$equal$val/;
+                               }
+                       }
+               }
+
+# check for bad placement of section $InitAttribute (e.g.: __initdata)
+               if ($line =~ /(\b$InitAttribute\b)/) {
+                       my $attr = $1;
+                       if ($line =~ /^\+\s*static\s+(?:const\s+)?(?:$attr\s+)?($NonptrTypeWithAttr)\s+(?:$attr\s+)?($Ident(?:\[[^]]*\])?)\s*[=;]/) {
+                               my $ptr = $1;
+                               my $var = $2;
+                               if ((($ptr =~ /\b(union|struct)\s+$attr\b/ &&
+                                     ERROR("MISPLACED_INIT",
+                                           "$attr should be placed after $var\n" . $herecurr)) ||
+                                    ($ptr !~ /\b(union|struct)\s+$attr\b/ &&
+                                     WARN("MISPLACED_INIT",
+                                          "$attr should be placed after $var\n" . $herecurr))) &&
+                                   $fix) {
+                                       $fixed[$fixlinenr] =~ s/(\bstatic\s+(?:const\s+)?)(?:$attr\s+)?($NonptrTypeWithAttr)\s+(?:$attr\s+)?($Ident(?:\[[^]]*\])?)\s*([=;])\s*/"$1" . trim(string_find_replace($2, "\\s*$attr\\s*", " ")) . " " . trim(string_find_replace($3, "\\s*$attr\\s*", "")) . " $attr" . ("$4" eq ";" ? ";" : " = ")/e;
+                               }
+                       }
+               }
+
+# check for $InitAttributeData (ie: __initdata) with const
+               if ($line =~ /\bconst\b/ && $line =~ /($InitAttributeData)/) {
+                       my $attr = $1;
+                       $attr =~ /($InitAttributePrefix)(.*)/;
+                       my $attr_prefix = $1;
+                       my $attr_type = $2;
+                       if (ERROR("INIT_ATTRIBUTE",
+                                 "Use of const init definition must use ${attr_prefix}initconst\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~
+                                   s/$InitAttributeData/${attr_prefix}initconst/;
+                       }
+               }
+
+# check for $InitAttributeConst (ie: __initconst) without const
+               if ($line !~ /\bconst\b/ && $line =~ /($InitAttributeConst)/) {
+                       my $attr = $1;
+                       if (ERROR("INIT_ATTRIBUTE",
+                                 "Use of $attr requires a separate use of const\n" . $herecurr) &&
+                           $fix) {
+                               my $lead = $fixed[$fixlinenr] =~
+                                   /(^\+\s*(?:static\s+))/;
+                               $lead = rtrim($1);
+                               $lead = "$lead " if ($lead !~ /^\+$/);
+                               $lead = "${lead}const ";
+                               $fixed[$fixlinenr] =~ s/(^\+\s*(?:static\s+))/$lead/;
+                       }
+               }
+
+# check for __read_mostly with const non-pointer (should just be const)
+               if ($line =~ /\b__read_mostly\b/ &&
+                   $line =~ /($Type)\s*$Ident/ && $1 !~ /\*\s*$/ && $1 =~ /\bconst\b/) {
+                       if (ERROR("CONST_READ_MOSTLY",
+                                 "Invalid use of __read_mostly with const type\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/\s+__read_mostly\b//;
+                       }
+               }
+
+# don't use __constant_<foo> functions outside of include/uapi/
+               if ($realfile !~ m@^include/uapi/@ &&
+                   $line =~ /(__constant_(?:htons|ntohs|[bl]e(?:16|32|64)_to_cpu|cpu_to_[bl]e(?:16|32|64)))\s*\(/) {
+                       my $constant_func = $1;
+                       my $func = $constant_func;
+                       $func =~ s/^__constant_//;
+                       if (WARN("CONSTANT_CONVERSION",
+                                "$constant_func should be $func\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/\b$constant_func\b/$func/g;
+                       }
+               }
+
+# prefer usleep_range over udelay
+               if ($line =~ /\budelay\s*\(\s*(\d+)\s*\)/) {
+                       my $delay = $1;
+                       # ignore udelay's < 10, however
+                       if (! ($delay < 10) ) {
+                               CHK("USLEEP_RANGE",
+                                   "usleep_range is preferred over udelay; see Documentation/timers/timers-howto.txt\n" . $herecurr);
+                       }
+                       if ($delay > 2000) {
+                               WARN("LONG_UDELAY",
+                                    "long udelay - prefer mdelay; see arch/arm/include/asm/delay.h\n" . $herecurr);
+                       }
+               }
+
+# warn about unexpectedly long msleep's
+               if ($line =~ /\bmsleep\s*\((\d+)\);/) {
+                       if ($1 < 20) {
+                               WARN("MSLEEP",
+                                    "msleep < 20ms can sleep for up to 20ms; see Documentation/timers/timers-howto.txt\n" . $herecurr);
+                       }
+               }
+
+# check for comparisons of jiffies
+               if ($line =~ /\bjiffies\s*$Compare|$Compare\s*jiffies\b/) {
+                       WARN("JIFFIES_COMPARISON",
+                            "Comparing jiffies is almost always wrong; prefer time_after, time_before and friends\n" . $herecurr);
+               }
+
+# check for comparisons of get_jiffies_64()
+               if ($line =~ /\bget_jiffies_64\s*\(\s*\)\s*$Compare|$Compare\s*get_jiffies_64\s*\(\s*\)/) {
+                       WARN("JIFFIES_COMPARISON",
+                            "Comparing get_jiffies_64() is almost always wrong; prefer time_after64, time_before64 and friends\n" . $herecurr);
+               }
+
+# warn about #ifdefs in C files
+#              if ($line =~ /^.\s*\#\s*if(|n)def/ && ($realfile =~ /\.c$/)) {
+#                      print "#ifdef in C files should be avoided\n";
+#                      print "$herecurr";
+#                      $clean = 0;
+#              }
+
+# warn about spacing in #ifdefs
+               if ($line =~ /^.\s*\#\s*(ifdef|ifndef|elif)\s\s+/) {
+                       if (ERROR("SPACING",
+                                 "exactly one space required after that #$1\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~
+                                   s/^(.\s*\#\s*(ifdef|ifndef|elif))\s{2,}/$1 /;
+                       }
+
+               }
+
+# check for spinlock_t definitions without a comment.
+               if ($line =~ /^.\s*(struct\s+mutex|spinlock_t)\s+\S+;/ ||
+                   $line =~ /^.\s*(DEFINE_MUTEX)\s*\(/) {
+                       my $which = $1;
+                       if (!ctx_has_comment($first_line, $linenr)) {
+                               CHK("UNCOMMENTED_DEFINITION",
+                                   "$1 definition without comment\n" . $herecurr);
+                       }
+               }
+# check for memory barriers without a comment.
+
+               my $barriers = qr{
+                       mb|
+                       rmb|
+                       wmb|
+                       read_barrier_depends
+               }x;
+               my $barrier_stems = qr{
+                       mb__before_atomic|
+                       mb__after_atomic|
+                       store_release|
+                       load_acquire|
+                       store_mb|
+                       (?:$barriers)
+               }x;
+               my $all_barriers = qr{
+                       (?:$barriers)|
+                       smp_(?:$barrier_stems)|
+                       virt_(?:$barrier_stems)
+               }x;
+
+               if ($line =~ /\b(?:$all_barriers)\s*\(/) {
+                       if (!ctx_has_comment($first_line, $linenr)) {
+                               WARN("MEMORY_BARRIER",
+                                    "memory barrier without comment\n" . $herecurr);
+                       }
+               }
+
+               my $underscore_smp_barriers = qr{__smp_(?:$barrier_stems)}x;
+
+               if ($realfile !~ m@^include/asm-generic/@ &&
+                   $realfile !~ m@/barrier\.h$@ &&
+                   $line =~ m/\b(?:$underscore_smp_barriers)\s*\(/ &&
+                   $line !~ m/^.\s*\#\s*define\s+(?:$underscore_smp_barriers)\s*\(/) {
+                       WARN("MEMORY_BARRIER",
+                            "__smp memory barriers shouldn't be used outside barrier.h and asm-generic\n" . $herecurr);
+               }
+
+# check for waitqueue_active without a comment.
+               if ($line =~ /\bwaitqueue_active\s*\(/) {
+                       if (!ctx_has_comment($first_line, $linenr)) {
+                               WARN("WAITQUEUE_ACTIVE",
+                                    "waitqueue_active without comment\n" . $herecurr);
+                       }
+               }
+
+# Check for expedited grace periods that interrupt non-idle non-nohz
+# online CPUs.  These expedited can therefore degrade real-time response
+# if used carelessly, and should be avoided where not absolutely
+# needed.  It is always OK to use synchronize_rcu_expedited() and
+# synchronize_sched_expedited() at boot time (before real-time applications
+# start) and in error situations where real-time response is compromised in
+# any case.  Note that synchronize_srcu_expedited() does -not- interrupt
+# other CPUs, so don't warn on uses of synchronize_srcu_expedited().
+# Of course, nothing comes for free, and srcu_read_lock() and
+# srcu_read_unlock() do contain full memory barriers in payment for
+# synchronize_srcu_expedited() non-interruption properties.
+               if ($line =~ /\b(synchronize_rcu_expedited|synchronize_sched_expedited)\(/) {
+                       WARN("EXPEDITED_RCU_GRACE_PERIOD",
+                            "expedited RCU grace periods should be avoided where they can degrade real-time response\n" . $herecurr);
+
+               }
+
+# check of hardware specific defines
+               if ($line =~ m@^.\s*\#\s*if.*\b(__i386__|__powerpc64__|__sun__|__s390x__)\b@ && $realfile !~ m@include/asm-@) {
+                       CHK("ARCH_DEFINES",
+                           "architecture specific defines should be avoided\n" .  $herecurr);
+               }
+
+# Check that the storage class is at the beginning of a declaration
+               if ($line =~ /\b$Storage\b/ && $line !~ /^.\s*$Storage\b/) {
+                       WARN("STORAGE_CLASS",
+                            "storage class should be at the beginning of the declaration\n" . $herecurr)
+               }
+
+# check the location of the inline attribute, that it is between
+# storage class and type.
+               if ($line =~ /\b$Type\s+$Inline\b/ ||
+                   $line =~ /\b$Inline\s+$Storage\b/) {
+                       ERROR("INLINE_LOCATION",
+                             "inline keyword should sit between storage class and type\n" . $herecurr);
+               }
+
+# Check for __inline__ and __inline, prefer inline
+               if ($realfile !~ m@\binclude/uapi/@ &&
+                   $line =~ /\b(__inline__|__inline)\b/) {
+                       if (WARN("INLINE",
+                                "plain inline is preferred over $1\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/\b(__inline__|__inline)\b/inline/;
+
+                       }
+               }
+
+# Check for __attribute__ packed, prefer __packed
+               if ($realfile !~ m@\binclude/uapi/@ &&
+                   $line =~ /\b__attribute__\s*\(\s*\(.*\bpacked\b/) {
+                       WARN("PREFER_PACKED",
+                            "__packed is preferred over __attribute__((packed))\n" . $herecurr);
+               }
+
+# Check for __attribute__ aligned, prefer __aligned
+               if ($realfile !~ m@\binclude/uapi/@ &&
+                   $line =~ /\b__attribute__\s*\(\s*\(.*aligned/) {
+                       WARN("PREFER_ALIGNED",
+                            "__aligned(size) is preferred over __attribute__((aligned(size)))\n" . $herecurr);
+               }
+
+# Check for __attribute__ format(printf, prefer __printf
+               if ($realfile !~ m@\binclude/uapi/@ &&
+                   $line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf/) {
+                       if (WARN("PREFER_PRINTF",
+                                "__printf(string-index, first-to-check) is preferred over __attribute__((format(printf, string-index, first-to-check)))\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf\s*,\s*(.*)\)\s*\)\s*\)/"__printf(" . trim($1) . ")"/ex;
+
+                       }
+               }
+
+# Check for __attribute__ format(scanf, prefer __scanf
+               if ($realfile !~ m@\binclude/uapi/@ &&
+                   $line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*scanf\b/) {
+                       if (WARN("PREFER_SCANF",
+                                "__scanf(string-index, first-to-check) is preferred over __attribute__((format(scanf, string-index, first-to-check)))\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/\b__attribute__\s*\(\s*\(\s*format\s*\(\s*scanf\s*,\s*(.*)\)\s*\)\s*\)/"__scanf(" . trim($1) . ")"/ex;
+                       }
+               }
+
+# Check for __attribute__ weak, or __weak declarations (may have link issues)
+               if ($^V && $^V ge 5.10.0 &&
+                   $line =~ /(?:$Declare|$DeclareMisordered)\s*$Ident\s*$balanced_parens\s*(?:$Attribute)?\s*;/ &&
+                   ($line =~ /\b__attribute__\s*\(\s*\(.*\bweak\b/ ||
+                    $line =~ /\b__weak\b/)) {
+                       ERROR("WEAK_DECLARATION",
+                             "Using weak declarations can have unintended link defects\n" . $herecurr);
+               }
+
+# check for c99 types like uint8_t used outside of uapi/
+               if ($realfile !~ m@\binclude/uapi/@ &&
+                   $line =~ /\b($Declare)\s*$Ident\s*[=;,\[]/) {
+                       my $type = $1;
+                       if ($type =~ /\b($typeC99Typedefs)\b/) {
+                               $type = $1;
+                               my $kernel_type = 'u';
+                               $kernel_type = 's' if ($type =~ /^_*[si]/);
+                               $type =~ /(\d+)/;
+                               $kernel_type .= $1;
+                               if (CHK("PREFER_KERNEL_TYPES",
+                                       "Prefer kernel type '$kernel_type' over '$type'\n" . $herecurr) &&
+                                   $fix) {
+                                       $fixed[$fixlinenr] =~ s/\b$type\b/$kernel_type/;
+                               }
+                       }
+               }
+
+# check for cast of C90 native int or longer types constants
+               if ($line =~ /(\(\s*$C90_int_types\s*\)\s*)($Constant)\b/) {
+                       my $cast = $1;
+                       my $const = $2;
+                       if (WARN("TYPECAST_INT_CONSTANT",
+                                "Unnecessary typecast of c90 int constant\n" . $herecurr) &&
+                           $fix) {
+                               my $suffix = "";
+                               my $newconst = $const;
+                               $newconst =~ s/${Int_type}$//;
+                               $suffix .= 'U' if ($cast =~ /\bunsigned\b/);
+                               if ($cast =~ /\blong\s+long\b/) {
+                                       $suffix .= 'LL';
+                               } elsif ($cast =~ /\blong\b/) {
+                                       $suffix .= 'L';
+                               }
+                               $fixed[$fixlinenr] =~ s/\Q$cast\E$const\b/$newconst$suffix/;
+                       }
+               }
+
+# check for sizeof(&)
+               if ($line =~ /\bsizeof\s*\(\s*\&/) {
+                       WARN("SIZEOF_ADDRESS",
+                            "sizeof(& should be avoided\n" . $herecurr);
+               }
+
+# check for sizeof without parenthesis
+               if ($line =~ /\bsizeof\s+((?:\*\s*|)$Lval|$Type(?:\s+$Lval|))/) {
+                       if (WARN("SIZEOF_PARENTHESIS",
+                                "sizeof $1 should be sizeof($1)\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/\bsizeof\s+((?:\*\s*|)$Lval|$Type(?:\s+$Lval|))/"sizeof(" . trim($1) . ")"/ex;
+                       }
+               }
+
+# check for struct spinlock declarations
+               if ($line =~ /^.\s*\bstruct\s+spinlock\s+\w+\s*;/) {
+                       WARN("USE_SPINLOCK_T",
+                            "struct spinlock should be spinlock_t\n" . $herecurr);
+               }
+
+# check for seq_printf uses that could be seq_puts
+               if ($sline =~ /\bseq_printf\s*\(.*"\s*\)\s*;\s*$/) {
+                       my $fmt = get_quoted_string($line, $rawline);
+                       $fmt =~ s/%%//g;
+                       if ($fmt !~ /%/) {
+                               if (WARN("PREFER_SEQ_PUTS",
+                                        "Prefer seq_puts to seq_printf\n" . $herecurr) &&
+                                   $fix) {
+                                       $fixed[$fixlinenr] =~ s/\bseq_printf\b/seq_puts/;
+                               }
+                       }
+               }
+
+# Check for misused memsets
+               if ($^V && $^V ge 5.10.0 &&
+                   defined $stat &&
+                   $stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*$FuncArg\s*\)/) {
+
+                       my $ms_addr = $2;
+                       my $ms_val = $7;
+                       my $ms_size = $12;
+
+                       if ($ms_size =~ /^(0x|)0$/i) {
+                               ERROR("MEMSET",
+                                     "memset to 0's uses 0 as the 2nd argument, not the 3rd\n" . "$here\n$stat\n");
+                       } elsif ($ms_size =~ /^(0x|)1$/i) {
+                               WARN("MEMSET",
+                                    "single byte memset is suspicious. Swapped 2nd/3rd argument?\n" . "$here\n$stat\n");
+                       }
+               }
+
+# Check for memcpy(foo, bar, ETH_ALEN) that could be ether_addr_copy(foo, bar)
+               if ($^V && $^V ge 5.10.0 &&
+                   defined $stat &&
+                   $stat =~ /^\+(?:.*?)\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
+                       if (WARN("PREFER_ETHER_ADDR_COPY",
+                                "Prefer ether_addr_copy() over memcpy() if the Ethernet addresses are __aligned(2)\n" . "$here\n$stat\n") &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/ether_addr_copy($2, $7)/;
+                       }
+               }
+
+# Check for memcmp(foo, bar, ETH_ALEN) that could be ether_addr_equal*(foo, bar)
+               if ($^V && $^V ge 5.10.0 &&
+                   defined $stat &&
+                   $stat =~ /^\+(?:.*?)\bmemcmp\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
+                       WARN("PREFER_ETHER_ADDR_EQUAL",
+                            "Prefer ether_addr_equal() or ether_addr_equal_unaligned() over memcmp()\n" . "$here\n$stat\n")
+               }
+
+# check for memset(foo, 0x0, ETH_ALEN) that could be eth_zero_addr
+# check for memset(foo, 0xFF, ETH_ALEN) that could be eth_broadcast_addr
+               if ($^V && $^V ge 5.10.0 &&
+                   defined $stat &&
+                   $stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
+
+                       my $ms_val = $7;
+
+                       if ($ms_val =~ /^(?:0x|)0+$/i) {
+                               if (WARN("PREFER_ETH_ZERO_ADDR",
+                                        "Prefer eth_zero_addr over memset()\n" . "$here\n$stat\n") &&
+                                   $fix) {
+                                       $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_zero_addr($2)/;
+                               }
+                       } elsif ($ms_val =~ /^(?:0xff|255)$/i) {
+                               if (WARN("PREFER_ETH_BROADCAST_ADDR",
+                                        "Prefer eth_broadcast_addr() over memset()\n" . "$here\n$stat\n") &&
+                                   $fix) {
+                                       $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_broadcast_addr($2)/;
+                               }
+                       }
+               }
+
+# typecasts on min/max could be min_t/max_t
+               if ($^V && $^V ge 5.10.0 &&
+                   defined $stat &&
+                   $stat =~ /^\+(?:.*?)\b(min|max)\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\)/) {
+                       if (defined $2 || defined $7) {
+                               my $call = $1;
+                               my $cast1 = deparenthesize($2);
+                               my $arg1 = $3;
+                               my $cast2 = deparenthesize($7);
+                               my $arg2 = $8;
+                               my $cast;
+
+                               if ($cast1 ne "" && $cast2 ne "" && $cast1 ne $cast2) {
+                                       $cast = "$cast1 or $cast2";
+                               } elsif ($cast1 ne "") {
+                                       $cast = $cast1;
+                               } else {
+                                       $cast = $cast2;
+                               }
+                               WARN("MINMAX",
+                                    "$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . "$here\n$stat\n");
+                       }
+               }
+
+# check usleep_range arguments
+               if ($^V && $^V ge 5.10.0 &&
+                   defined $stat &&
+                   $stat =~ /^\+(?:.*?)\busleep_range\s*\(\s*($FuncArg)\s*,\s*($FuncArg)\s*\)/) {
+                       my $min = $1;
+                       my $max = $7;
+                       if ($min eq $max) {
+                               WARN("USLEEP_RANGE",
+                                    "usleep_range should not use min == max args; see Documentation/timers/timers-howto.txt\n" . "$here\n$stat\n");
+                       } elsif ($min =~ /^\d+$/ && $max =~ /^\d+$/ &&
+                                $min > $max) {
+                               WARN("USLEEP_RANGE",
+                                    "usleep_range args reversed, use min then max; see Documentation/timers/timers-howto.txt\n" . "$here\n$stat\n");
+                       }
+               }
+
+# check for naked sscanf
+               if ($^V && $^V ge 5.10.0 &&
+                   defined $stat &&
+                   $line =~ /\bsscanf\b/ &&
+                   ($stat !~ /$Ident\s*=\s*sscanf\s*$balanced_parens/ &&
+                    $stat !~ /\bsscanf\s*$balanced_parens\s*(?:$Compare)/ &&
+                    $stat !~ /(?:$Compare)\s*\bsscanf\s*$balanced_parens/)) {
+                       my $lc = $stat =~ tr@\n@@;
+                       $lc = $lc + $linenr;
+                       my $stat_real = raw_line($linenr, 0);
+                       for (my $count = $linenr + 1; $count <= $lc; $count++) {
+                               $stat_real = $stat_real . "\n" . raw_line($count, 0);
+                       }
+                       WARN("NAKED_SSCANF",
+                            "unchecked sscanf return value\n" . "$here\n$stat_real\n");
+               }
+
+# check for simple sscanf that should be kstrto<foo>
+               if ($^V && $^V ge 5.10.0 &&
+                   defined $stat &&
+                   $line =~ /\bsscanf\b/) {
+                       my $lc = $stat =~ tr@\n@@;
+                       $lc = $lc + $linenr;
+                       my $stat_real = raw_line($linenr, 0);
+                       for (my $count = $linenr + 1; $count <= $lc; $count++) {
+                               $stat_real = $stat_real . "\n" . raw_line($count, 0);
+                       }
+                       if ($stat_real =~ /\bsscanf\b\s*\(\s*$FuncArg\s*,\s*("[^"]+")/) {
+                               my $format = $6;
+                               my $count = $format =~ tr@%@%@;
+                               if ($count == 1 &&
+                                   $format =~ /^"\%(?i:ll[udxi]|[udxi]ll|ll|[hl]h?[udxi]|[udxi][hl]h?|[hl]h?|[udxi])"$/) {
+                                       WARN("SSCANF_TO_KSTRTO",
+                                            "Prefer kstrto<type> to single variable sscanf\n" . "$here\n$stat_real\n");
+                               }
+                       }
+               }
+
+# check for new externs in .h files.
+               if ($realfile =~ /\.h$/ &&
+                   $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
+                       if (CHK("AVOID_EXTERNS",
+                               "extern prototypes should be avoided in .h files\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/(.*)\bextern\b\s*(.*)/$1$2/;
+                       }
+               }
+
+# check for new externs in .c files.
+               if ($realfile =~ /\.c$/ && defined $stat &&
+                   $stat =~ /^.\s*(?:extern\s+)?$Type\s+($Ident)(\s*)\(/s)
+               {
+                       my $function_name = $1;
+                       my $paren_space = $2;
+
+                       my $s = $stat;
+                       if (defined $cond) {
+                               substr($s, 0, length($cond), '');
+                       }
+                       if ($s =~ /^\s*;/ &&
+                           $function_name ne 'uninitialized_var')
+                       {
+                               WARN("AVOID_EXTERNS",
+                                    "externs should be avoided in .c files\n" .  $herecurr);
+                       }
+
+                       if ($paren_space =~ /\n/) {
+                               WARN("FUNCTION_ARGUMENTS",
+                                    "arguments for function declarations should follow identifier\n" . $herecurr);
+                       }
+
+               } elsif ($realfile =~ /\.c$/ && defined $stat &&
+                   $stat =~ /^.\s*extern\s+/)
+               {
+                       WARN("AVOID_EXTERNS",
+                            "externs should be avoided in .c files\n" .  $herecurr);
+               }
+
+# checks for new __setup's
+               if ($rawline =~ /\b__setup\("([^"]*)"/) {
+                       my $name = $1;
+
+                       if (!grep(/$name/, @setup_docs)) {
+                               CHK("UNDOCUMENTED_SETUP",
+                                   "__setup appears un-documented -- check Documentation/kernel-parameters.txt\n" . $herecurr);
+                       }
+               }
+
+# check for pointless casting of kmalloc return
+               if ($line =~ /\*\s*\)\s*[kv][czm]alloc(_node){0,1}\b/) {
+                       WARN("UNNECESSARY_CASTS",
+                            "unnecessary cast may hide bugs, see http://c-faq.com/malloc/mallocnocast.html\n" . $herecurr);
+               }
+
+# alloc style
+# p = alloc(sizeof(struct foo), ...) should be p = alloc(sizeof(*p), ...)
+               if ($^V && $^V ge 5.10.0 &&
+                   $line =~ /\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*([kv][mz]alloc(?:_node)?)\s*\(\s*(sizeof\s*\(\s*struct\s+$Lval\s*\))/) {
+                       CHK("ALLOC_SIZEOF_STRUCT",
+                           "Prefer $3(sizeof(*$1)...) over $3($4...)\n" . $herecurr);
+               }
+
+# check for k[mz]alloc with multiplies that could be kmalloc_array/kcalloc
+               if ($^V && $^V ge 5.10.0 &&
+                   $line =~ /\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*(k[mz]alloc)\s*\(\s*($FuncArg)\s*\*\s*($FuncArg)\s*,/) {
+                       my $oldfunc = $3;
+                       my $a1 = $4;
+                       my $a2 = $10;
+                       my $newfunc = "kmalloc_array";
+                       $newfunc = "kcalloc" if ($oldfunc eq "kzalloc");
+                       my $r1 = $a1;
+                       my $r2 = $a2;
+                       if ($a1 =~ /^sizeof\s*\S/) {
+                               $r1 = $a2;
+                               $r2 = $a1;
+                       }
+                       if ($r1 !~ /^sizeof\b/ && $r2 =~ /^sizeof\s*\S/ &&
+                           !($r1 =~ /^$Constant$/ || $r1 =~ /^[A-Z_][A-Z0-9_]*$/)) {
+                               if (WARN("ALLOC_WITH_MULTIPLY",
+                                        "Prefer $newfunc over $oldfunc with multiply\n" . $herecurr) &&
+                                   $fix) {
+                                       $fixed[$fixlinenr] =~ s/\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*(k[mz]alloc)\s*\(\s*($FuncArg)\s*\*\s*($FuncArg)/$1 . ' = ' . "$newfunc(" . trim($r1) . ', ' . trim($r2)/e;
+
+                               }
+                       }
+               }
+
+# check for krealloc arg reuse
+               if ($^V && $^V ge 5.10.0 &&
+                   $line =~ /\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*krealloc\s*\(\s*\1\s*,/) {
+                       WARN("KREALLOC_ARG_REUSE",
+                            "Reusing the krealloc arg is almost always a bug\n" . $herecurr);
+               }
+
+# check for alloc argument mismatch
+               if ($line =~ /\b(kcalloc|kmalloc_array)\s*\(\s*sizeof\b/) {
+                       WARN("ALLOC_ARRAY_ARGS",
+                            "$1 uses number as first arg, sizeof is generally wrong\n" . $herecurr);
+               }
+
+# check for multiple semicolons
+               if ($line =~ /;\s*;\s*$/) {
+                       if (WARN("ONE_SEMICOLON",
+                                "Statements terminations use 1 semicolon\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/(\s*;\s*){2,}$/;/g;
+                       }
+               }
+
+# check for #defines like: 1 << <digit> that could be BIT(digit)
+               if ($line =~ /#\s*define\s+\w+\s+\(?\s*1\s*([ulUL]*)\s*\<\<\s*(?:\d+|$Ident)\s*\)?/) {
+                       my $ull = "";
+                       $ull = "_ULL" if (defined($1) && $1 =~ /ll/i);
+                       if (CHK("BIT_MACRO",
+                               "Prefer using the BIT$ull macro\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/\(?\s*1\s*[ulUL]*\s*<<\s*(\d+|$Ident)\s*\)?/BIT${ull}($1)/;
+                       }
+               }
+
+# check for case / default statements not preceded by break/fallthrough/switch
+               if ($line =~ /^.\s*(?:case\s+(?:$Ident|$Constant)\s*|default):/) {
+                       my $has_break = 0;
+                       my $has_statement = 0;
+                       my $count = 0;
+                       my $prevline = $linenr;
+                       while ($prevline > 1 && ($file || $count < 3) && !$has_break) {
+                               $prevline--;
+                               my $rline = $rawlines[$prevline - 1];
+                               my $fline = $lines[$prevline - 1];
+                               last if ($fline =~ /^\@\@/);
+                               next if ($fline =~ /^\-/);
+                               next if ($fline =~ /^.(?:\s*(?:case\s+(?:$Ident|$Constant)[\s$;]*|default):[\s$;]*)*$/);
+                               $has_break = 1 if ($rline =~ /fall[\s_-]*(through|thru)/i);
+                               next if ($fline =~ /^.[\s$;]*$/);
+                               $has_statement = 1;
+                               $count++;
+                               $has_break = 1 if ($fline =~ /\bswitch\b|\b(?:break\s*;[\s$;]*$|return\b|goto\b|continue\b)/);
+                       }
+                       if (!$has_break && $has_statement) {
+                               WARN("MISSING_BREAK",
+                                    "Possible switch case/default not preceeded by break or fallthrough comment\n" . $herecurr);
+                       }
+               }
+
+# check for switch/default statements without a break;
+               if ($^V && $^V ge 5.10.0 &&
+                   defined $stat &&
+                   $stat =~ /^\+[$;\s]*(?:case[$;\s]+\w+[$;\s]*:[$;\s]*|)*[$;\s]*\bdefault[$;\s]*:[$;\s]*;/g) {
+                       my $ctx = '';
+                       my $herectx = $here . "\n";
+                       my $cnt = statement_rawlines($stat);
+                       for (my $n = 0; $n < $cnt; $n++) {
+                               $herectx .= raw_line($linenr, $n) . "\n";
+                       }
+                       WARN("DEFAULT_NO_BREAK",
+                            "switch default: should use break\n" . $herectx);
+               }
+
+# check for gcc specific __FUNCTION__
+               if ($line =~ /\b__FUNCTION__\b/) {
+                       if (WARN("USE_FUNC",
+                                "__func__ should be used instead of gcc specific __FUNCTION__\n"  . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/\b__FUNCTION__\b/__func__/g;
+                       }
+               }
+
+# check for uses of __DATE__, __TIME__, __TIMESTAMP__
+               while ($line =~ /\b(__(?:DATE|TIME|TIMESTAMP)__)\b/g) {
+                       ERROR("DATE_TIME",
+                             "Use of the '$1' macro makes the build non-deterministic\n" . $herecurr);
+               }
+
+# check for use of yield()
+               if ($line =~ /\byield\s*\(\s*\)/) {
+                       WARN("YIELD",
+                            "Using yield() is generally wrong. See yield() kernel-doc (sched/core.c)\n"  . $herecurr);
+               }
+
+# check for comparisons against true and false
+               if ($line =~ /\+\s*(.*?)\b(true|false|$Lval)\s*(==|\!=)\s*(true|false|$Lval)\b(.*)$/i) {
+                       my $lead = $1;
+                       my $arg = $2;
+                       my $test = $3;
+                       my $otype = $4;
+                       my $trail = $5;
+                       my $op = "!";
+
+                       ($arg, $otype) = ($otype, $arg) if ($arg =~ /^(?:true|false)$/i);
+
+                       my $type = lc($otype);
+                       if ($type =~ /^(?:true|false)$/) {
+                               if (("$test" eq "==" && "$type" eq "true") ||
+                                   ("$test" eq "!=" && "$type" eq "false")) {
+                                       $op = "";
+                               }
+
+                               CHK("BOOL_COMPARISON",
+                                   "Using comparison to $otype is error prone\n" . $herecurr);
+
+## maybe suggesting a correct construct would better
+##                                 "Using comparison to $otype is error prone.  Perhaps use '${lead}${op}${arg}${trail}'\n" . $herecurr);
+
+                       }
+               }
+
+# check for semaphores initialized locked
+               if ($line =~ /^.\s*sema_init.+,\W?0\W?\)/) {
+                       WARN("CONSIDER_COMPLETION",
+                            "consider using a completion\n" . $herecurr);
+               }
+
+# recommend kstrto* over simple_strto* and strict_strto*
+               if ($line =~ /\b((simple|strict)_(strto(l|ll|ul|ull)))\s*\(/) {
+                       WARN("CONSIDER_KSTRTO",
+                            "$1 is obsolete, use k$3 instead\n" . $herecurr);
+               }
+
+# check for __initcall(), use device_initcall() explicitly or more appropriate function please
+               if ($line =~ /^.\s*__initcall\s*\(/) {
+                       WARN("USE_DEVICE_INITCALL",
+                            "please use device_initcall() or more appropriate function instead of __initcall() (see include/linux/init.h)\n" . $herecurr);
+               }
+
+# check for various structs that are normally const (ops, kgdb, device_tree)
+               my $const_structs = qr{
+                               acpi_dock_ops|
+                               address_space_operations|
+                               backlight_ops|
+                               block_device_operations|
+                               dentry_operations|
+                               dev_pm_ops|
+                               dma_map_ops|
+                               extent_io_ops|
+                               file_lock_operations|
+                               file_operations|
+                               hv_ops|
+                               ide_dma_ops|
+                               intel_dvo_dev_ops|
+                               item_operations|
+                               iwl_ops|
+                               kgdb_arch|
+                               kgdb_io|
+                               kset_uevent_ops|
+                               lock_manager_operations|
+                               microcode_ops|
+                               mtrr_ops|
+                               neigh_ops|
+                               nlmsvc_binding|
+                               of_device_id|
+                               pci_raw_ops|
+                               pipe_buf_operations|
+                               platform_hibernation_ops|
+                               platform_suspend_ops|
+                               proto_ops|
+                               rpc_pipe_ops|
+                               seq_operations|
+                               snd_ac97_build_ops|
+                               soc_pcmcia_socket_ops|
+                               stacktrace_ops|
+                               sysfs_ops|
+                               tty_operations|
+                               uart_ops|
+                               usb_mon_operations|
+                               wd_ops}x;
+               if ($line !~ /\bconst\b/ &&
+                   $line =~ /\bstruct\s+($const_structs)\b/) {
+                       WARN("CONST_STRUCT",
+                            "struct $1 should normally be const\n" .
+                               $herecurr);
+               }
+
+# use of NR_CPUS is usually wrong
+# ignore definitions of NR_CPUS and usage to define arrays as likely right
+               if ($line =~ /\bNR_CPUS\b/ &&
+                   $line !~ /^.\s*\s*#\s*if\b.*\bNR_CPUS\b/ &&
+                   $line !~ /^.\s*\s*#\s*define\b.*\bNR_CPUS\b/ &&
+                   $line !~ /^.\s*$Declare\s.*\[[^\]]*NR_CPUS[^\]]*\]/ &&
+                   $line !~ /\[[^\]]*\.\.\.[^\]]*NR_CPUS[^\]]*\]/ &&
+                   $line !~ /\[[^\]]*NR_CPUS[^\]]*\.\.\.[^\]]*\]/)
+               {
+                       WARN("NR_CPUS",
+                            "usage of NR_CPUS is often wrong - consider using cpu_possible(), num_possible_cpus(), for_each_possible_cpu(), etc\n" . $herecurr);
+               }
+
+# Use of __ARCH_HAS_<FOO> or ARCH_HAVE_<BAR> is wrong.
+               if ($line =~ /\+\s*#\s*define\s+((?:__)?ARCH_(?:HAS|HAVE)\w*)\b/) {
+                       ERROR("DEFINE_ARCH_HAS",
+                             "#define of '$1' is wrong - use Kconfig variables or standard guards instead\n" . $herecurr);
+               }
+
+# likely/unlikely comparisons similar to "(likely(foo) > 0)"
+               if ($^V && $^V ge 5.10.0 &&
+                   $line =~ /\b((?:un)?likely)\s*\(\s*$FuncArg\s*\)\s*$Compare/) {
+                       WARN("LIKELY_MISUSE",
+                            "Using $1 should generally have parentheses around the comparison\n" . $herecurr);
+               }
+
+# whine mightly about in_atomic
+               if ($line =~ /\bin_atomic\s*\(/) {
+                       if ($realfile =~ m@^drivers/@) {
+                               ERROR("IN_ATOMIC",
+                                     "do not use in_atomic in drivers\n" . $herecurr);
+                       } elsif ($realfile !~ m@^kernel/@) {
+                               WARN("IN_ATOMIC",
+                                    "use of in_atomic() is incorrect outside core kernel code\n" . $herecurr);
+                       }
+               }
+
+# check for lockdep_set_novalidate_class
+               if ($line =~ /^.\s*lockdep_set_novalidate_class\s*\(/ ||
+                   $line =~ /__lockdep_no_validate__\s*\)/ ) {
+                       if ($realfile !~ m@^kernel/lockdep@ &&
+                           $realfile !~ m@^include/linux/lockdep@ &&
+                           $realfile !~ m@^drivers/base/core@) {
+                               ERROR("LOCKDEP",
+                                     "lockdep_no_validate class is reserved for device->mutex.\n" . $herecurr);
+                       }
+               }
+
+               if ($line =~ /debugfs_create_\w+.*\b$mode_perms_world_writable\b/ ||
+                   $line =~ /DEVICE_ATTR.*\b$mode_perms_world_writable\b/) {
+                       WARN("EXPORTED_WORLD_WRITABLE",
+                            "Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr);
+               }
+
+# Mode permission misuses where it seems decimal should be octal
+# This uses a shortcut match to avoid unnecessary uses of a slow foreach loop
+               if ($^V && $^V ge 5.10.0 &&
+                   $line =~ /$mode_perms_search/) {
+                       foreach my $entry (@mode_permission_funcs) {
+                               my $func = $entry->[0];
+                               my $arg_pos = $entry->[1];
+
+                               my $skip_args = "";
+                               if ($arg_pos > 1) {
+                                       $arg_pos--;
+                                       $skip_args = "(?:\\s*$FuncArg\\s*,\\s*){$arg_pos,$arg_pos}";
+                               }
+                               my $test = "\\b$func\\s*\\(${skip_args}([\\d]+)\\s*[,\\)]";
+                               if ($line =~ /$test/) {
+                                       my $val = $1;
+                                       $val = $6 if ($skip_args ne "");
+
+                                       if ($val !~ /^0$/ &&
+                                           (($val =~ /^$Int$/ && $val !~ /^$Octal$/) ||
+                                            length($val) ne 4)) {
+                                               ERROR("NON_OCTAL_PERMISSIONS",
+                                                     "Use 4 digit octal (0777) not decimal permissions\n" . $herecurr);
+                                       } elsif ($val =~ /^$Octal$/ && (oct($val) & 02)) {
+                                               ERROR("EXPORTED_WORLD_WRITABLE",
+                                                     "Exporting writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr);
+                                       }
+                               }
+                       }
+               }
+
+# validate content of MODULE_LICENSE against list from include/linux/module.h
+               if ($line =~ /\bMODULE_LICENSE\s*\(\s*($String)\s*\)/) {
+                       my $extracted_string = get_quoted_string($line, $rawline);
+                       my $valid_licenses = qr{
+                                               GPL|
+                                               GPL\ v2|
+                                               GPL\ and\ additional\ rights|
+                                               Dual\ BSD/GPL|
+                                               Dual\ MIT/GPL|
+                                               Dual\ MPL/GPL|
+                                               Proprietary
+                                       }x;
+                       if ($extracted_string !~ /^"(?:$valid_licenses)"$/x) {
+                               WARN("MODULE_LICENSE",
+                                    "unknown module license " . $extracted_string . "\n" . $herecurr);
+                       }
+               }
+       }
+
+       # If we have no input at all, then there is nothing to report on
+       # so just keep quiet.
+       if ($#rawlines == -1) {
+               exit(0);
+       }
+
+       # In mailback mode only produce a report in the negative, for
+       # things that appear to be patches.
+       if ($mailback && ($clean == 1 || !$is_patch)) {
+               exit(0);
+       }
+
+       # This is not a patch, and we are are in 'no-patch' mode so
+       # just keep quiet.
+       if (!$chk_patch && !$is_patch) {
+               exit(0);
+       }
+
+       if (!$is_patch && $file !~ /cover-letter\.patch$/) {
+               ERROR("NOT_UNIFIED_DIFF",
+                     "Does not appear to be a unified-diff format patch\n");
+       }
+       if ($is_patch && $filename ne '-' && $chk_signoff && $signoff == 0) {
+               ERROR("MISSING_SIGN_OFF",
+                     "Missing Signed-off-by: line(s)\n");
+       }
+
+       print report_dump();
+       if ($summary && !($clean == 1 && $quiet == 1)) {
+               print "$filename " if ($summary_file);
+               print "total: $cnt_error errors, $cnt_warn warnings, " .
+                       (($check)? "$cnt_chk checks, " : "") .
+                       "$cnt_lines lines checked\n";
+       }
+
+       if ($quiet == 0) {
+               # If there were whitespace errors which cleanpatch can fix
+               # then suggest that.
+               if ($rpt_cleaners) {
+                       $rpt_cleaners = 0;
+                       print << "EOM"
+
+NOTE: Whitespace errors detected.
+      You may wish to use scripts/cleanpatch or scripts/cleanfile
+EOM
+               }
+       }
+
+       if ($clean == 0 && $fix &&
+           ("@rawlines" ne "@fixed" ||
+            $#fixed_inserted >= 0 || $#fixed_deleted >= 0)) {
+               my $newfile = $filename;
+               $newfile .= ".EXPERIMENTAL-checkpatch-fixes" if (!$fix_inplace);
+               my $linecount = 0;
+               my $f;
+
+               @fixed = fix_inserted_deleted_lines(\@fixed, \@fixed_inserted, \@fixed_deleted);
+
+               open($f, '>', $newfile)
+                   or die "$P: Can't open $newfile for write\n";
+               foreach my $fixed_line (@fixed) {
+                       $linecount++;
+                       if ($file) {
+                               if ($linecount > 3) {
+                                       $fixed_line =~ s/^\+//;
+                                       print $f $fixed_line . "\n";
+                               }
+                       } else {
+                               print $f $fixed_line . "\n";
+                       }
+               }
+               close($f);
+
+               if (!$quiet) {
+                       print << "EOM";
+
+Wrote EXPERIMENTAL --fix correction(s) to '$newfile'
+
+Do _NOT_ trust the results written to this file.
+Do _NOT_ submit these changes without inspecting them for correctness.
+
+This EXPERIMENTAL file is simply a convenience to help rewrite patches.
+No warranties, expressed or implied...
+EOM
+               }
+       }
+
+       if ($quiet == 0) {
+               print "\n";
+               if ($clean == 1) {
+                       print "$vname has no obvious style problems and is ready for submission.\n";
+               } else {
+                       print "$vname has style problems, please review.\n";
+               }
+       }
+       return $clean;
+}
diff --git a/drivers/staging/greybus/scripts/spelling.txt b/drivers/staging/greybus/scripts/spelling.txt
new file mode 100644 (file)
index 0000000..946caf3
--- /dev/null
@@ -0,0 +1,1072 @@
+# Originally from Debian's Lintian tool. Various false positives have been
+# removed, and various additions have been made as they've been discovered
+# in the kernel source.
+#
+# License: GPLv2
+#
+# The format of each line is:
+# mistake||correction
+#
+abandonning||abandoning
+abigious||ambiguous
+abitrate||arbitrate
+abov||above
+abreviated||abbreviated
+absense||absence
+absolut||absolute
+absoulte||absolute
+acccess||access
+acceleratoin||acceleration
+accelleration||acceleration
+accesing||accessing
+accesnt||accent
+accessable||accessible
+accesss||access
+accidentaly||accidentally
+accidentually||accidentally
+accoding||according
+accomodate||accommodate
+accomodates||accommodates
+accordign||according
+accoring||according
+accout||account
+accquire||acquire
+accquired||acquired
+accross||across
+acessable||accessible
+acess||access
+achitecture||architecture
+acient||ancient
+acitions||actions
+acitve||active
+acknowldegement||acknowldegement
+acknowledgement||acknowledgment
+ackowledge||acknowledge
+ackowledged||acknowledged
+acording||according
+activete||activate
+acumulating||accumulating
+adapater||adapter
+addional||additional
+additionaly||additionally
+addres||address
+addreses||addresses
+addresss||address
+aditional||additional
+aditionally||additionally
+aditionaly||additionally
+adminstrative||administrative
+adress||address
+adresses||addresses
+adviced||advised
+afecting||affecting
+agaist||against
+albumns||albums
+alegorical||allegorical
+algorith||algorithm
+algorithmical||algorithmically
+algoritm||algorithm
+algoritms||algorithms
+algorrithm||algorithm
+algorritm||algorithm
+allign||align
+allocatrd||allocated
+allocte||allocate
+allpication||application
+alocate||allocate
+alogirhtms||algorithms
+alogrithm||algorithm
+alot||a lot
+alow||allow
+alows||allows
+altough||although
+alue||value
+ambigious||ambiguous
+amoung||among
+amout||amount
+analysator||analyzer
+ang||and
+anniversery||anniversary
+annoucement||announcement
+anomolies||anomalies
+anomoly||anomaly
+anway||anyway
+aplication||application
+appearence||appearance
+applicaion||application
+appliction||application
+applictions||applications
+appplications||applications
+appropiate||appropriate
+appropriatly||appropriately
+approriate||appropriate
+approriately||appropriately
+apropriate||appropriate
+aquainted||acquainted
+aquired||acquired
+aquisition||acquisition
+arbitary||arbitrary
+architechture||architecture
+arguement||argument
+arguements||arguments
+aritmetic||arithmetic
+arne't||aren't
+arraival||arrival
+artifical||artificial
+artillary||artillery
+asign||assign
+assertation||assertion
+assiged||assigned
+assigment||assignment
+assigments||assignments
+assistent||assistant
+assocation||association
+associcated||associated
+assotiated||associated
+assum||assume
+assumtpion||assumption
+asuming||assuming
+asycronous||asynchronous
+asynchnous||asynchronous
+atomatically||automatically
+atomicly||atomically
+attachement||attachment
+attched||attached
+attemps||attempts
+attruibutes||attributes
+authentification||authentication
+automaticaly||automatically
+automaticly||automatically
+automatize||automate
+automatized||automated
+automatizes||automates
+autonymous||autonomous
+auxillary||auxiliary
+auxilliary||auxiliary
+avaiable||available
+avaible||available
+availabe||available
+availabled||available
+availablity||availability
+availale||available
+availavility||availability
+availble||available
+availiable||available
+avalable||available
+avaliable||available
+aysnc||async
+backgroud||background
+backword||backward
+backwords||backwards
+bahavior||behavior
+bakup||backup
+baloon||balloon
+baloons||balloons
+bandwith||bandwidth
+batery||battery
+beacuse||because
+becasue||because
+becomming||becoming
+becuase||because
+beeing||being
+befor||before
+begining||beginning
+beter||better
+betweeen||between
+bianries||binaries
+bitmast||bitmask
+boardcast||broadcast
+borad||board
+boundry||boundary
+brievely||briefly
+broadcat||broadcast
+cacluated||calculated
+caculation||calculation
+calender||calendar
+calle||called
+calucate||calculate
+calulate||calculate
+cancelation||cancellation
+capabilites||capabilities
+capabitilies||capabilities
+capatibilities||capabilities
+carefuly||carefully
+cariage||carriage
+catagory||category
+cehck||check
+challange||challenge
+challanges||challenges
+chanell||channel
+changable||changeable
+channle||channel
+channnel||channel
+charachter||character
+charachters||characters
+charactor||character
+charater||character
+charaters||characters
+charcter||character
+chcek||check
+chck||check
+checksuming||checksumming
+childern||children
+childs||children
+chiled||child
+chked||checked
+chnage||change
+chnages||changes
+chnnel||channel
+choosen||chosen
+chouse||chose
+circumvernt||circumvent
+claread||cleared
+clared||cleared
+closeing||closing
+clustred||clustered
+collapsable||collapsible
+colorfull||colorful
+comand||command
+comit||commit
+commerical||commercial
+comming||coming
+comminucation||communication
+commited||committed
+commiting||committing
+committ||commit
+commoditiy||commodity
+compability||compatibility
+compaibility||compatibility
+compatability||compatibility
+compatable||compatible
+compatibiliy||compatibility
+compatibilty||compatibility
+compatiblity||compatibility
+competion||completion
+compilant||compliant
+compleatly||completely
+completly||completely
+complient||compliant
+componnents||components
+compres||compress
+compresion||compression
+comression||compression
+comunication||communication
+conbination||combination
+conditionaly||conditionally
+conected||connected
+configuratoin||configuration
+configuraton||configuration
+configuretion||configuration
+conider||consider
+conjuction||conjunction
+connectinos||connections
+connnection||connection
+connnections||connections
+consistancy||consistency
+consistant||consistent
+containes||contains
+containts||contains
+contaisn||contains
+contant||contact
+contence||contents
+continous||continuous
+continously||continuously
+continueing||continuing
+contraints||constraints
+controled||controlled
+controler||controller
+controll||control
+contruction||construction
+contry||country
+convertion||conversion
+convertor||converter
+convienient||convenient
+convinient||convenient
+corected||corrected
+correponding||corresponding
+correponds||corresponds
+correspoding||corresponding
+cotrol||control
+couter||counter
+coutner||counter
+cryptocraphic||cryptographic
+cunter||counter
+curently||currently
+dafault||default
+deafult||default
+deamon||daemon
+decompres||decompress
+decription||description
+defailt||default
+defferred||deferred
+definate||definite
+definately||definitely
+defintion||definition
+defintions||definitions
+defualt||default
+defult||default
+deivce||device
+delared||declared
+delare||declare
+delares||declares
+delaring||declaring
+delemiter||delimiter
+dependancies||dependencies
+dependancy||dependency
+dependant||dependent
+depreacted||deprecated
+depreacte||deprecate
+desactivate||deactivate
+desciptors||descriptors
+descripton||description
+descrition||description
+descritptor||descriptor
+desctiptor||descriptor
+desriptor||descriptor
+desriptors||descriptors
+destory||destroy
+destoryed||destroyed
+destorys||destroys
+destroied||destroyed
+detabase||database
+develope||develop
+developement||development
+developped||developed
+developpement||development
+developper||developer
+developpment||development
+deveolpment||development
+devided||divided
+deviece||device
+diable||disable
+dictionnary||dictionary
+didnt||didn't
+diferent||different
+differrence||difference
+difinition||definition
+diplay||display
+direectly||directly
+disapear||disappear
+disapeared||disappeared
+disappared||disappeared
+disconnet||disconnect
+discontinous||discontinuous
+dispertion||dispersion
+dissapears||disappears
+distiction||distinction
+docuentation||documentation
+documantation||documentation
+documentaion||documentation
+documment||document
+doesnt||doesn't
+dorp||drop
+dosen||doesn
+downlad||download
+downlads||downloads
+druing||during
+dynmaic||dynamic
+easilly||easily
+ecspecially||especially
+edditable||editable
+editting||editing
+efficently||efficiently
+ehther||ether
+eigth||eight
+eletronic||electronic
+enabledi||enabled
+enchanced||enhanced
+encorporating||incorporating
+encrupted||encrypted
+encrypiton||encryption
+endianess||endianness
+enhaced||enhanced
+enlightnment||enlightenment
+enocded||encoded
+enterily||entirely
+enviroiment||environment
+enviroment||environment
+environement||environment
+environent||environment
+eqivalent||equivalent
+equiped||equipped
+equivelant||equivalent
+equivilant||equivalent
+eror||error
+estbalishment||establishment
+etsablishment||establishment
+etsbalishment||establishment
+excecutable||executable
+exceded||exceeded
+excellant||excellent
+existance||existence
+existant||existent
+exixt||exist
+exlcude||exclude
+exlcusive||exclusive
+exmaple||example
+expecially||especially
+explicite||explicit
+explicitely||explicitly
+explict||explicit
+explictly||explicitly
+expresion||expression
+exprimental||experimental
+extened||extended
+extensability||extensibility
+extention||extension
+extracter||extractor
+faild||failed
+faill||fail
+failue||failure
+failuer||failure
+faireness||fairness
+faliure||failure
+familar||familiar
+fatser||faster
+feauture||feature
+feautures||features
+fetaure||feature
+fetaures||features
+fileystem||filesystem
+finanize||finalize
+findn||find
+finilizes||finalizes
+finsih||finish
+flusing||flushing
+folloing||following
+followign||following
+follwing||following
+forseeable||foreseeable
+forse||force
+fortan||fortran
+forwardig||forwarding
+framwork||framework
+frequncy||frequency
+frome||from
+fucntion||function
+fuction||function
+fuctions||functions
+funcion||function
+functionallity||functionality
+functionaly||functionally
+functionnality||functionality
+functonality||functionality
+funtion||function
+funtions||functions
+furthur||further
+futhermore||furthermore
+futrue||future
+gaurenteed||guaranteed
+generiously||generously
+genric||generic
+globel||global
+grabing||grabbing
+grahical||graphical
+grahpical||graphical
+grapic||graphic
+guage||gauge
+guarenteed||guaranteed
+guarentee||guarantee
+halfs||halves
+hander||handler
+handfull||handful
+hanled||handled
+happend||happened
+harware||hardware
+heirarchically||hierarchically
+helpfull||helpful
+hierachy||hierarchy
+hierarchie||hierarchy
+howver||however
+hsould||should
+hypter||hyper
+identidier||identifier
+imblance||imbalance
+immeadiately||immediately
+immedaite||immediate
+immediatelly||immediately
+immediatly||immediately
+immidiate||immediate
+impelentation||implementation
+impementated||implemented
+implemantation||implementation
+implemenation||implementation
+implementaiton||implementation
+implementated||implemented
+implemention||implementation
+implemetation||implementation
+implemntation||implementation
+implentation||implementation
+implmentation||implementation
+implmenting||implementing
+incomming||incoming
+incompatabilities||incompatibilities
+incompatable||incompatible
+inconsistant||inconsistent
+increas||increase
+incrment||increment
+indendation||indentation
+indended||intended
+independant||independent
+independantly||independently
+independed||independent
+indiate||indicate
+inexpect||inexpected
+infomation||information
+informatiom||information
+informations||information
+informtion||information
+infromation||information
+ingore||ignore
+inital||initial
+initalised||initialized
+initalise||initialize
+initalize||initialize
+initation||initiation
+initators||initiators
+initializiation||initialization
+initialzed||initialized
+initilization||initialization
+initilize||initialize
+inofficial||unofficial
+insititute||institute
+instal||install
+inteface||interface
+integreated||integrated
+integrety||integrity
+integrey||integrity
+intendet||intended
+intented||intended
+interanl||internal
+interchangable||interchangeable
+interferring||interfering
+interger||integer
+intermittant||intermittent
+internel||internal
+interoprability||interoperability
+interrface||interface
+interrrupt||interrupt
+interrup||interrupt
+interrups||interrupts
+interruptted||interrupted
+interupted||interrupted
+interupt||interrupt
+intial||initial
+intialized||initialized
+intialize||initialize
+intregral||integral
+intrrupt||interrupt
+intuative||intuitive
+invaid||invalid
+invalde||invald
+invalide||invalid
+invididual||individual
+invokation||invocation
+invokations||invocations
+irrelevent||irrelevant
+isnt||isn't
+isssue||issue
+itslef||itself
+jave||java
+jeffies||jiffies
+juse||just
+jus||just
+kown||known
+langage||language
+langauage||language
+langauge||language
+langugage||language
+lauch||launch
+layed||laid
+leightweight||lightweight
+lengh||length
+lenght||length
+lenth||length
+lesstiff||lesstif
+libaries||libraries
+libary||library
+librairies||libraries
+libraris||libraries
+licenceing||licencing
+loggging||logging
+loggin||login
+logile||logfile
+loosing||losing
+losted||lost
+machinary||machinery
+maintainance||maintenance
+maintainence||maintenance
+maintan||maintain
+makeing||making
+malplaced||misplaced
+malplace||misplace
+managable||manageable
+managment||management
+mangement||management
+manoeuvering||maneuvering
+mappping||mapping
+mathimatical||mathematical
+mathimatic||mathematic
+mathimatics||mathematics
+maxium||maximum
+mechamism||mechanism
+meetign||meeting
+ment||meant
+mergable||mergeable
+mesage||message
+messags||messages
+messgaes||messages
+messsage||message
+messsages||messages
+microprocesspr||microprocessor
+milliseonds||milliseconds
+minumum||minimum
+miscelleneous||miscellaneous
+misformed||malformed
+mispelled||misspelled
+mispelt||misspelt
+miximum||maximum
+mmnemonic||mnemonic
+mnay||many
+modeled||modelled
+modulues||modules
+monochorome||monochrome
+monochromo||monochrome
+monocrome||monochrome
+mopdule||module
+mroe||more
+mulitplied||multiplied
+multidimensionnal||multidimensional
+multple||multiple
+mumber||number
+muticast||multicast
+mutiple||multiple
+mutli||multi
+nams||names
+navagating||navigating
+nead||need
+neccecary||necessary
+neccesary||necessary
+neccessary||necessary
+necesary||necessary
+negaive||negative
+negoitation||negotiation
+negotation||negotiation
+nerver||never
+nescessary||necessary
+nessessary||necessary
+noticable||noticeable
+notications||notifications
+notifed||notified
+numebr||number
+numner||number
+obtaion||obtain
+occassionally||occasionally
+occationally||occasionally
+occurance||occurrence
+occurances||occurrences
+occured||occurred
+occurence||occurrence
+occure||occurred
+occuring||occurring
+offet||offset
+omitt||omit
+ommiting||omitting
+ommitted||omitted
+onself||oneself
+ony||only
+operatione||operation
+opertaions||operations
+optionnal||optional
+optmizations||optimizations
+orientatied||orientated
+orientied||oriented
+otherise||otherwise
+ouput||output
+overaall||overall
+overhread||overhead
+overlaping||overlapping
+overriden||overridden
+overun||overrun
+pacakge||package
+pachage||package
+packacge||package
+packege||package
+packge||package
+packtes||packets
+pakage||package
+pallette||palette
+paln||plan
+paramameters||parameters
+paramater||parameter
+parametes||parameters
+parametised||parametrised
+paramter||parameter
+paramters||parameters
+particuarly||particularly
+particularily||particularly
+pased||passed
+passin||passing
+pathes||paths
+pecularities||peculiarities
+peformance||performance
+peice||piece
+pendantic||pedantic
+peprocessor||preprocessor
+perfoming||performing
+permissons||permissions
+peroid||period
+persistance||persistence
+persistant||persistent
+platfrom||platform
+plattform||platform
+pleaes||please
+ploting||plotting
+plugable||pluggable
+poinnter||pointer
+poiter||pointer
+posible||possible
+positon||position
+possibilites||possibilities
+powerfull||powerful
+preceeded||preceded
+preceeding||preceding
+preceed||precede
+precendence||precedence
+precission||precision
+preemptable||preemptible
+prefered||preferred
+prefferably||preferably
+premption||preemption
+prepaired||prepared
+pressre||pressure
+primative||primitive
+princliple||principle
+priorty||priority
+privilaged||privileged
+privilage||privilege
+priviledge||privilege
+priviledges||privileges
+probaly||probably
+procceed||proceed
+proccesors||processors
+procesed||processed
+proces||process
+processessing||processing
+processess||processes
+processpr||processor
+processsed||processed
+processsing||processing
+procteted||protected
+prodecure||procedure
+progams||programs
+progess||progress
+programers||programmers
+programm||program
+programms||programs
+progresss||progress
+promiscous||promiscuous
+promps||prompts
+pronnounced||pronounced
+prononciation||pronunciation
+pronouce||pronounce
+pronunce||pronounce
+propery||property
+propigate||propagate
+propigation||propagation
+propogate||propagate
+prosess||process
+protable||portable
+protcol||protocol
+protecion||protection
+protocoll||protocol
+psudo||pseudo
+psuedo||pseudo
+psychadelic||psychedelic
+pwoer||power
+quering||querying
+raoming||roaming
+reasearcher||researcher
+reasearchers||researchers
+reasearch||research
+recepient||recipient
+receving||receiving
+recieved||received
+recieve||receive
+reciever||receiver
+recieves||receives
+recogniced||recognised
+recognizeable||recognizable
+recommanded||recommended
+recyle||recycle
+redircet||redirect
+redirectrion||redirection
+refcounf||refcount
+refence||reference
+refered||referred
+referenace||reference
+refering||referring
+refernces||references
+refernnce||reference
+refrence||reference
+registerd||registered
+registeresd||registered
+registes||registers
+registraration||registration
+regster||register
+regualar||regular
+reguator||regulator
+regulamentations||regulations
+reigstration||registration
+releated||related
+relevent||relevant
+remoote||remote
+remore||remote
+removeable||removable
+repectively||respectively
+replacable||replaceable
+replacments||replacements
+replys||replies
+reponse||response
+representaion||representation
+reqeust||request
+requiere||require
+requirment||requirement
+requred||required
+requried||required
+requst||request
+reseting||resetting
+resizeable||resizable
+resouces||resources
+resoures||resources
+responce||response
+ressizes||resizes
+ressource||resource
+ressources||resources
+retransmited||retransmitted
+retreived||retrieved
+retreive||retrieve
+retrive||retrieve
+retuned||returned
+reudce||reduce
+reuest||request
+reuqest||request
+reutnred||returned
+rmeoved||removed
+rmeove||remove
+rmeoves||removes
+rountine||routine
+routins||routines
+rquest||request
+runing||running
+runned||ran
+runnning||running
+runtine||runtime
+sacrifying||sacrificing
+safly||safely
+safty||safety
+savable||saveable
+scaned||scanned
+scaning||scanning
+scarch||search
+seach||search
+searchs||searches
+secquence||sequence
+secund||second
+segement||segment
+senarios||scenarios
+sentivite||sensitive
+separatly||separately
+sepcify||specify
+sepc||spec
+seperated||separated
+seperately||separately
+seperate||separate
+seperatly||separately
+seperator||separator
+sepperate||separate
+sequece||sequence
+sequencial||sequential
+serveral||several
+setts||sets
+settting||setting
+shotdown||shutdown
+shoud||should
+shouldnt||shouldn't
+shoule||should
+shrinked||shrunk
+siginificantly||significantly
+signabl||signal
+similary||similarly
+similiar||similar
+simlar||similar
+simliar||similar
+simpified||simplified
+singaled||signaled
+singal||signal
+singed||signed
+sleeped||slept
+softwares||software
+speach||speech
+specfic||specific
+speciefied||specified
+specifc||specific
+specifed||specified
+specificatin||specification
+specificaton||specification
+specifing||specifying
+specifiying||specifying
+speficied||specified
+speicify||specify
+speling||spelling
+spinlcok||spinlock
+spinock||spinlock
+splitted||split
+spreaded||spread
+sructure||structure
+stablilization||stabilization
+staically||statically
+staion||station
+standardss||standards
+standartization||standardization
+standart||standard
+staticly||statically
+stoped||stopped
+stoppped||stopped
+straming||streaming
+struc||struct
+structres||structures
+stuct||struct
+stucture||structure
+sturcture||structure
+subdirectoires||subdirectories
+suble||subtle
+substract||subtract
+succesfully||successfully
+succesful||successful
+successfull||successful
+sucessfully||successfully
+sucess||success
+superflous||superfluous
+superseeded||superseded
+suplied||supplied
+suported||supported
+suport||support
+suppored||supported
+supportin||supporting
+suppoted||supported
+suppported||supported
+suppport||support
+supress||suppress
+surpresses||suppresses
+susbsystem||subsystem
+suspicously||suspiciously
+swaping||swapping
+switchs||switches
+symetric||symmetric
+synax||syntax
+synchonized||synchronized
+syncronize||synchronize
+syncronizing||synchronizing
+syncronus||synchronous
+syste||system
+sytem||system
+sythesis||synthesis
+taht||that
+targetted||targeted
+targetting||targeting
+teh||the
+temorary||temporary
+temproarily||temporarily
+thier||their
+threds||threads
+threshhold||threshold
+throught||through
+thses||these
+tiggered||triggered
+tipically||typically
+tmis||this
+torerable||tolerable
+tramsmitted||transmitted
+tramsmit||transmit
+tranfer||transfer
+transciever||transceiver
+transferd||transferrd
+transfered||transferred
+transfering||transferring
+transision||transition
+transmittd||transmitted
+transormed||transformed
+trasmission||transmission
+treshold||threshold
+trigerring||triggering
+trun||turn
+ture||true
+tyep||type
+udpate||update
+uesd||used
+unconditionaly||unconditionally
+underun||underrun
+unecessary||unnecessary
+unexecpted||unexpected
+unexpectd||unexpected
+unexpeted||unexpected
+unfortunatelly||unfortunately
+unifiy||unify
+unintialized||uninitialized
+unknonw||unknown
+unknow||unknown
+unkown||unknown
+unneedingly||unnecessarily
+unresgister||unregister
+unsinged||unsigned
+unstabel||unstable
+unsuccessfull||unsuccessful
+unsuported||unsupported
+untill||until
+unuseful||useless
+upate||update
+usefule||useful
+usefull||useful
+usege||usage
+usera||users
+usualy||usually
+utilites||utilities
+utillities||utilities
+utilties||utilities
+utiltity||utility
+utitity||utility
+utitlty||utility
+vaid||valid
+vaild||valid
+valide||valid
+variantions||variations
+varient||variant
+vaule||value
+verbse||verbose
+verisons||versions
+verison||version
+verson||version
+vicefersa||vice-versa
+virtal||virtual
+virtaul||virtual
+virtiual||virtual
+visiters||visitors
+vitual||virtual
+wating||waiting
+wether||whether
+whataver||whatever
+whcih||which
+whenver||whenever
+wheter||whether
+whe||when
+wierd||weird
+wiil||will
+wirte||write
+withing||within
+wnat||want
+workarould||workaround
+writeing||writing
+writting||writing
+zombe||zombie
+zomebie||zombie
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
new file mode 100644 (file)
index 0000000..a78d9e4
--- /dev/null
@@ -0,0 +1,888 @@
+/*
+ * SD/MMC Greybus driver.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+
+struct gb_sdio_host {
+       struct gb_connection    *connection;
+       struct gbphy_device     *gbphy_dev;
+       struct mmc_host         *mmc;
+       struct mmc_request      *mrq;
+       struct mutex            lock;   /* lock for this host */
+       size_t                  data_max;
+       spinlock_t              xfer;   /* lock to cancel ongoing transfer */
+       bool                    xfer_stop;
+       struct workqueue_struct *mrq_workqueue;
+       struct work_struct      mrqwork;
+       u8                      queued_events;
+       bool                    removed;
+       bool                    card_present;
+       bool                    read_only;
+};
+
+
+#define GB_SDIO_RSP_R1_R5_R6_R7        (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
+                                GB_SDIO_RSP_OPCODE)
+#define GB_SDIO_RSP_R3_R4      (GB_SDIO_RSP_PRESENT)
+#define GB_SDIO_RSP_R2         (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
+                                GB_SDIO_RSP_136)
+#define GB_SDIO_RSP_R1B                (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
+                                GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
+
+/* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
+#define GB_SDIO_VDD_SHIFT      8
+
+static inline bool single_op(struct mmc_command *cmd)
+{
+       uint32_t opcode = cmd->opcode;
+
+       return opcode == MMC_WRITE_BLOCK ||
+              opcode == MMC_READ_SINGLE_BLOCK;
+}
+
+static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
+{
+       u32 caps = 0;
+       u32 caps2 = 0;
+
+       caps = ((r & GB_SDIO_CAP_NONREMOVABLE) ? MMC_CAP_NONREMOVABLE : 0) |
+               ((r & GB_SDIO_CAP_4_BIT_DATA) ? MMC_CAP_4_BIT_DATA : 0) |
+               ((r & GB_SDIO_CAP_8_BIT_DATA) ? MMC_CAP_8_BIT_DATA : 0) |
+               ((r & GB_SDIO_CAP_MMC_HS) ? MMC_CAP_MMC_HIGHSPEED : 0) |
+               ((r & GB_SDIO_CAP_SD_HS) ? MMC_CAP_SD_HIGHSPEED : 0) |
+               ((r & GB_SDIO_CAP_ERASE) ? MMC_CAP_ERASE : 0) |
+               ((r & GB_SDIO_CAP_1_2V_DDR) ? MMC_CAP_1_2V_DDR : 0) |
+               ((r & GB_SDIO_CAP_1_8V_DDR) ? MMC_CAP_1_8V_DDR : 0) |
+               ((r & GB_SDIO_CAP_POWER_OFF_CARD) ? MMC_CAP_POWER_OFF_CARD : 0) |
+               ((r & GB_SDIO_CAP_UHS_SDR12) ? MMC_CAP_UHS_SDR12 : 0) |
+               ((r & GB_SDIO_CAP_UHS_SDR25) ? MMC_CAP_UHS_SDR25 : 0) |
+               ((r & GB_SDIO_CAP_UHS_SDR50) ? MMC_CAP_UHS_SDR50 : 0) |
+               ((r & GB_SDIO_CAP_UHS_SDR104) ? MMC_CAP_UHS_SDR104 : 0) |
+               ((r & GB_SDIO_CAP_UHS_DDR50) ? MMC_CAP_UHS_DDR50 : 0) |
+               ((r & GB_SDIO_CAP_DRIVER_TYPE_A) ? MMC_CAP_DRIVER_TYPE_A : 0) |
+               ((r & GB_SDIO_CAP_DRIVER_TYPE_C) ? MMC_CAP_DRIVER_TYPE_C : 0) |
+               ((r & GB_SDIO_CAP_DRIVER_TYPE_D) ? MMC_CAP_DRIVER_TYPE_D : 0);
+
+       caps2 = ((r & GB_SDIO_CAP_HS200_1_2V) ? MMC_CAP2_HS200_1_2V_SDR : 0) |
+#ifdef MMC_HS400_SUPPORTED
+               ((r & GB_SDIO_CAP_HS400_1_2V) ? MMC_CAP2_HS400_1_2V : 0) |
+               ((r & GB_SDIO_CAP_HS400_1_8V) ? MMC_CAP2_HS400_1_8V : 0) |
+#endif
+               ((r & GB_SDIO_CAP_HS200_1_8V) ? MMC_CAP2_HS200_1_8V_SDR : 0);
+
+       host->mmc->caps = caps;
+       host->mmc->caps2 = caps2 | MMC_CAP2_CORE_RUNTIME_PM;
+
+       if (caps & MMC_CAP_NONREMOVABLE)
+               host->card_present = true;
+}
+
+static u32 _gb_sdio_get_host_ocr(u32 ocr)
+{
+       return (((ocr & GB_SDIO_VDD_165_195) ? MMC_VDD_165_195 : 0) |
+               ((ocr & GB_SDIO_VDD_20_21) ? MMC_VDD_20_21 : 0) |
+               ((ocr & GB_SDIO_VDD_21_22) ? MMC_VDD_21_22 : 0) |
+               ((ocr & GB_SDIO_VDD_22_23) ? MMC_VDD_22_23 : 0) |
+               ((ocr & GB_SDIO_VDD_23_24) ? MMC_VDD_23_24 : 0) |
+               ((ocr & GB_SDIO_VDD_24_25) ? MMC_VDD_24_25 : 0) |
+               ((ocr & GB_SDIO_VDD_25_26) ? MMC_VDD_25_26 : 0) |
+               ((ocr & GB_SDIO_VDD_26_27) ? MMC_VDD_26_27 : 0) |
+               ((ocr & GB_SDIO_VDD_27_28) ? MMC_VDD_27_28 : 0) |
+               ((ocr & GB_SDIO_VDD_28_29) ? MMC_VDD_28_29 : 0) |
+               ((ocr & GB_SDIO_VDD_29_30) ? MMC_VDD_29_30 : 0) |
+               ((ocr & GB_SDIO_VDD_30_31) ? MMC_VDD_30_31 : 0) |
+               ((ocr & GB_SDIO_VDD_31_32) ? MMC_VDD_31_32 : 0) |
+               ((ocr & GB_SDIO_VDD_32_33) ? MMC_VDD_32_33 : 0) |
+               ((ocr & GB_SDIO_VDD_33_34) ? MMC_VDD_33_34 : 0) |
+               ((ocr & GB_SDIO_VDD_34_35) ? MMC_VDD_34_35 : 0) |
+               ((ocr & GB_SDIO_VDD_35_36) ? MMC_VDD_35_36 : 0)
+               );
+}
+
+static int gb_sdio_get_caps(struct gb_sdio_host *host)
+{
+       struct gb_sdio_get_caps_response response;
+       struct mmc_host *mmc = host->mmc;
+       u16 data_max;
+       u32 blksz;
+       u32 ocr;
+       u32 r;
+       int ret;
+
+       ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_GET_CAPABILITIES,
+                               NULL, 0, &response, sizeof(response));
+       if (ret < 0)
+               return ret;
+       r = le32_to_cpu(response.caps);
+
+       _gb_sdio_set_host_caps(host, r);
+
+       /* get the max block size that could fit our payload */
+       data_max = gb_operation_get_payload_size_max(host->connection);
+       data_max = min(data_max - sizeof(struct gb_sdio_transfer_request),
+                      data_max - sizeof(struct gb_sdio_transfer_response));
+
+       blksz = min(le16_to_cpu(response.max_blk_size), data_max);
+       blksz = max_t(u32, 512, blksz);
+
+       mmc->max_blk_size = rounddown_pow_of_two(blksz);
+       mmc->max_blk_count = le16_to_cpu(response.max_blk_count);
+       host->data_max = data_max;
+
+       /* get ocr supported values */
+       ocr = _gb_sdio_get_host_ocr(le32_to_cpu(response.ocr));
+       mmc->ocr_avail = ocr;
+       mmc->ocr_avail_sdio = mmc->ocr_avail;
+       mmc->ocr_avail_sd = mmc->ocr_avail;
+       mmc->ocr_avail_mmc = mmc->ocr_avail;
+
+       /* get frequency range values */
+       mmc->f_min = le32_to_cpu(response.f_min);
+       mmc->f_max = le32_to_cpu(response.f_max);
+
+       return 0;
+}
+
+static void _gb_queue_event(struct gb_sdio_host *host, u8 event)
+{
+       if (event & GB_SDIO_CARD_INSERTED)
+               host->queued_events &= ~GB_SDIO_CARD_REMOVED;
+       else if (event & GB_SDIO_CARD_REMOVED)
+               host->queued_events &= ~GB_SDIO_CARD_INSERTED;
+
+       host->queued_events |= event;
+}
+
+static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
+{
+       u8 state_changed = 0;
+
+       if (event & GB_SDIO_CARD_INSERTED) {
+               if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+                       return 0;
+               if (host->card_present)
+                       return 0;
+               host->card_present = true;
+               state_changed = 1;
+       }
+
+       if (event & GB_SDIO_CARD_REMOVED) {
+               if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+                       return 0;
+               if (!(host->card_present))
+                       return 0;
+               host->card_present = false;
+               state_changed = 1;
+       }
+
+       if (event & GB_SDIO_WP) {
+               host->read_only = true;
+       }
+
+       if (state_changed) {
+               dev_info(mmc_dev(host->mmc), "card %s now event\n",
+                        (host->card_present ?  "inserted" : "removed"));
+               mmc_detect_change(host->mmc, 0);
+       }
+
+       return 0;
+}
+
+static int gb_sdio_request_handler(struct gb_operation *op)
+{
+       struct gb_sdio_host *host = gb_connection_get_data(op->connection);
+       struct gb_message *request;
+       struct gb_sdio_event_request *payload;
+       u8 type = op->type;
+       int ret =  0;
+       u8 event;
+
+       if (type != GB_SDIO_TYPE_EVENT) {
+               dev_err(mmc_dev(host->mmc),
+                       "unsupported unsolicited event: %u\n", type);
+               return -EINVAL;
+       }
+
+       request = op->request;
+
+       if (request->payload_size < sizeof(*payload)) {
+               dev_err(mmc_dev(host->mmc), "wrong event size received (%zu < %zu)\n",
+                       request->payload_size, sizeof(*payload));
+               return -EINVAL;
+       }
+
+       payload = request->payload;
+       event = payload->event;
+
+       if (host->removed)
+               _gb_queue_event(host, event);
+       else
+               ret = _gb_sdio_process_events(host, event);
+
+       return ret;
+}
+
+static int gb_sdio_set_ios(struct gb_sdio_host *host,
+                          struct gb_sdio_set_ios_request *request)
+{
+       int ret;
+
+       ret = gbphy_runtime_get_sync(host->gbphy_dev);
+       if (ret)
+               return ret;
+
+       ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_SET_IOS, request,
+                               sizeof(*request), NULL, 0);
+
+       gbphy_runtime_put_autosuspend(host->gbphy_dev);
+
+       return ret;
+}
+
+static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
+                        size_t len, u16 nblocks, off_t skip)
+{
+       struct gb_sdio_transfer_request *request;
+       struct gb_sdio_transfer_response *response;
+       struct gb_operation *operation;
+       struct scatterlist *sg = data->sg;
+       unsigned int sg_len = data->sg_len;
+       size_t copied;
+       u16 send_blksz;
+       u16 send_blocks;
+       int ret;
+
+       WARN_ON(len > host->data_max);
+
+       operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
+                                       len + sizeof(*request),
+                                       sizeof(*response), GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       request = operation->request->payload;
+       request->data_flags = (data->flags >> 8);
+       request->data_blocks = cpu_to_le16(nblocks);
+       request->data_blksz = cpu_to_le16(data->blksz);
+
+       copied = sg_pcopy_to_buffer(sg, sg_len, &request->data[0], len, skip);
+
+       if (copied != len) {
+               ret = -EINVAL;
+               goto err_put_operation;
+       }
+
+       ret = gb_operation_request_send_sync(operation);
+       if (ret < 0)
+               goto err_put_operation;
+
+       response = operation->response->payload;
+
+       send_blocks = le16_to_cpu(response->data_blocks);
+       send_blksz = le16_to_cpu(response->data_blksz);
+
+       if (len != send_blksz * send_blocks) {
+               dev_err(mmc_dev(host->mmc), "send: size received: %zu != %d\n",
+                       len, send_blksz * send_blocks);
+               ret = -EINVAL;
+       }
+
+err_put_operation:
+       gb_operation_put(operation);
+
+       return ret;
+}
+
+static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
+                        size_t len, u16 nblocks, off_t skip)
+{
+       struct gb_sdio_transfer_request *request;
+       struct gb_sdio_transfer_response *response;
+       struct gb_operation *operation;
+       struct scatterlist *sg = data->sg;
+       unsigned int sg_len = data->sg_len;
+       size_t copied;
+       u16 recv_blksz;
+       u16 recv_blocks;
+       int ret;
+
+       WARN_ON(len > host->data_max);
+
+       operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
+                                       sizeof(*request),
+                                       len + sizeof(*response), GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       request = operation->request->payload;
+       request->data_flags = (data->flags >> 8);
+       request->data_blocks = cpu_to_le16(nblocks);
+       request->data_blksz = cpu_to_le16(data->blksz);
+
+       ret = gb_operation_request_send_sync(operation);
+       if (ret < 0)
+               goto err_put_operation;
+
+       response = operation->response->payload;
+       recv_blocks = le16_to_cpu(response->data_blocks);
+       recv_blksz = le16_to_cpu(response->data_blksz);
+
+       if (len != recv_blksz * recv_blocks) {
+               dev_err(mmc_dev(host->mmc), "recv: size received: %d != %zu\n",
+                       recv_blksz * recv_blocks, len);
+               ret = -EINVAL;
+               goto err_put_operation;
+       }
+
+       copied = sg_pcopy_from_buffer(sg, sg_len, &response->data[0], len,
+                                     skip);
+       if (copied != len)
+               ret = -EINVAL;
+
+err_put_operation:
+       gb_operation_put(operation);
+
+       return ret;
+}
+
+static int gb_sdio_transfer(struct gb_sdio_host *host, struct mmc_data *data)
+{
+       size_t left, len;
+       off_t skip = 0;
+       int ret = 0;
+       u16 nblocks;
+
+       if (single_op(data->mrq->cmd) && data->blocks > 1) {
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+
+       left = data->blksz * data->blocks;
+
+       while (left) {
+               /* check is a stop transmission is pending */
+               spin_lock(&host->xfer);
+               if (host->xfer_stop) {
+                       host->xfer_stop = false;
+                       spin_unlock(&host->xfer);
+                       ret = -EINTR;
+                       goto out;
+               }
+               spin_unlock(&host->xfer);
+               len = min(left, host->data_max);
+               nblocks = len / data->blksz;
+               len = nblocks * data->blksz;
+
+               if (data->flags & MMC_DATA_READ) {
+                       ret = _gb_sdio_recv(host, data, len, nblocks, skip);
+                       if (ret < 0)
+                               goto out;
+               } else {
+                       ret = _gb_sdio_send(host, data, len, nblocks, skip);
+                       if (ret < 0)
+                               goto out;
+               }
+               data->bytes_xfered += len;
+               left -= len;
+               skip += len;
+       }
+
+out:
+       data->error = ret;
+       return ret;
+}
+
+static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
+{
+       struct gb_sdio_command_request request = {0};
+       struct gb_sdio_command_response response;
+       struct mmc_data *data = host->mrq->data;
+       u8 cmd_flags;
+       u8 cmd_type;
+       int i;
+       int ret;
+
+       switch (mmc_resp_type(cmd)) {
+       case MMC_RSP_NONE:
+               cmd_flags = GB_SDIO_RSP_NONE;
+               break;
+       case MMC_RSP_R1:
+               cmd_flags = GB_SDIO_RSP_R1_R5_R6_R7;
+               break;
+       case MMC_RSP_R1B:
+               cmd_flags = GB_SDIO_RSP_R1B;
+               break;
+       case MMC_RSP_R2:
+               cmd_flags = GB_SDIO_RSP_R2;
+               break;
+       case MMC_RSP_R3:
+               cmd_flags = GB_SDIO_RSP_R3_R4;
+               break;
+       default:
+               dev_err(mmc_dev(host->mmc), "cmd flag invalid 0x%04x\n",
+                       mmc_resp_type(cmd));
+               ret = -EINVAL;
+               goto out;
+       }
+
+       switch (mmc_cmd_type(cmd)) {
+       case MMC_CMD_BC:
+               cmd_type = GB_SDIO_CMD_BC;
+               break;
+       case MMC_CMD_BCR:
+               cmd_type = GB_SDIO_CMD_BCR;
+               break;
+       case MMC_CMD_AC:
+               cmd_type = GB_SDIO_CMD_AC;
+               break;
+       case MMC_CMD_ADTC:
+               cmd_type = GB_SDIO_CMD_ADTC;
+               break;
+       default:
+               dev_err(mmc_dev(host->mmc), "cmd type invalid 0x%04x\n",
+                       mmc_cmd_type(cmd));
+               ret = -EINVAL;
+               goto out;
+       }
+
+       request.cmd = cmd->opcode;
+       request.cmd_flags = cmd_flags;
+       request.cmd_type = cmd_type;
+       request.cmd_arg = cpu_to_le32(cmd->arg);
+       /* some controllers need to know at command time data details */
+       if (data) {
+               request.data_blocks = cpu_to_le16(data->blocks);
+               request.data_blksz = cpu_to_le16(data->blksz);
+       }
+
+       ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND,
+                               &request, sizeof(request), &response,
+                               sizeof(response));
+       if (ret < 0)
+               goto out;
+
+       /* no response expected */
+       if (cmd_flags & GB_SDIO_RSP_NONE)
+               goto out;
+
+       /* long response expected */
+       if (cmd_flags & GB_SDIO_RSP_R2)
+               for (i = 0; i < 4; i++)
+                       cmd->resp[i] = le32_to_cpu(response.resp[i]);
+       else
+               cmd->resp[0] = le32_to_cpu(response.resp[0]);
+
+out:
+       cmd->error = ret;
+       return ret;
+}
+
+static void gb_sdio_mrq_work(struct work_struct *work)
+{
+       struct gb_sdio_host *host;
+       struct mmc_request *mrq;
+       int ret;
+
+       host = container_of(work, struct gb_sdio_host, mrqwork);
+
+       ret = gbphy_runtime_get_sync(host->gbphy_dev);
+       if (ret)
+               return;
+
+       mutex_lock(&host->lock);
+       mrq = host->mrq;
+       if (!mrq) {
+               mutex_unlock(&host->lock);
+               gbphy_runtime_put_autosuspend(host->gbphy_dev);
+               dev_err(mmc_dev(host->mmc), "mmc request is NULL");
+               return;
+       }
+
+       if (host->removed) {
+               mrq->cmd->error = -ESHUTDOWN;
+               goto done;
+       }
+
+       if (mrq->sbc) {
+               ret = gb_sdio_command(host, mrq->sbc);
+               if (ret < 0)
+                       goto done;
+       }
+
+       ret = gb_sdio_command(host, mrq->cmd);
+       if (ret < 0)
+               goto done;
+
+       if (mrq->data) {
+               ret = gb_sdio_transfer(host, mrq->data);
+               if (ret < 0)
+                       goto done;
+       }
+
+       if (mrq->stop) {
+               ret = gb_sdio_command(host, mrq->stop);
+               if (ret < 0)
+                       goto done;
+       }
+
+done:
+       host->mrq = NULL;
+       mutex_unlock(&host->lock);
+       mmc_request_done(host->mmc, mrq);
+       gbphy_runtime_put_autosuspend(host->gbphy_dev);
+}
+
+static void gb_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+       struct gb_sdio_host *host = mmc_priv(mmc);
+       struct mmc_command *cmd = mrq->cmd;
+
+       /* Check if it is a cancel to ongoing transfer */
+       if (cmd->opcode == MMC_STOP_TRANSMISSION) {
+               spin_lock(&host->xfer);
+               host->xfer_stop = true;
+               spin_unlock(&host->xfer);
+       }
+
+       mutex_lock(&host->lock);
+
+       WARN_ON(host->mrq);
+       host->mrq = mrq;
+
+       if (host->removed) {
+               mrq->cmd->error = -ESHUTDOWN;
+               goto out;
+       }
+       if (!host->card_present) {
+               mrq->cmd->error = -ENOMEDIUM;
+               goto out;
+       }
+
+       queue_work(host->mrq_workqueue, &host->mrqwork);
+
+       mutex_unlock(&host->lock);
+       return;
+
+out:
+       host->mrq = NULL;
+       mutex_unlock(&host->lock);
+       mmc_request_done(mmc, mrq);
+}
+
+static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+       struct gb_sdio_host *host = mmc_priv(mmc);
+       struct gb_sdio_set_ios_request request;
+       int ret;
+       u8 power_mode;
+       u8 bus_width;
+       u8 timing;
+       u8 signal_voltage;
+       u8 drv_type;
+       u32 vdd = 0;
+
+       mutex_lock(&host->lock);
+       request.clock = cpu_to_le32(ios->clock);
+
+       if (ios->vdd)
+               vdd = 1 << (ios->vdd - GB_SDIO_VDD_SHIFT);
+       request.vdd = cpu_to_le32(vdd);
+
+       request.bus_mode = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
+                           GB_SDIO_BUSMODE_OPENDRAIN :
+                           GB_SDIO_BUSMODE_PUSHPULL);
+
+       switch (ios->power_mode) {
+       case MMC_POWER_OFF:
+       default:
+               power_mode = GB_SDIO_POWER_OFF;
+               break;
+       case MMC_POWER_UP:
+               power_mode = GB_SDIO_POWER_UP;
+               break;
+       case MMC_POWER_ON:
+               power_mode = GB_SDIO_POWER_ON;
+               break;
+#ifdef MMC_POWER_UNDEFINED_SUPPORTED
+       case MMC_POWER_UNDEFINED:
+               power_mode = GB_SDIO_POWER_UNDEFINED;
+               break;
+#endif
+       }
+       request.power_mode = power_mode;
+
+       switch (ios->bus_width) {
+       case MMC_BUS_WIDTH_1:
+               bus_width = GB_SDIO_BUS_WIDTH_1;
+               break;
+       case MMC_BUS_WIDTH_4:
+       default:
+               bus_width = GB_SDIO_BUS_WIDTH_4;
+               break;
+       case MMC_BUS_WIDTH_8:
+               bus_width = GB_SDIO_BUS_WIDTH_8;
+               break;
+       }
+       request.bus_width = bus_width;
+
+       switch (ios->timing) {
+       case MMC_TIMING_LEGACY:
+       default:
+               timing = GB_SDIO_TIMING_LEGACY;
+               break;
+       case MMC_TIMING_MMC_HS:
+               timing = GB_SDIO_TIMING_MMC_HS;
+               break;
+       case MMC_TIMING_SD_HS:
+               timing = GB_SDIO_TIMING_SD_HS;
+               break;
+       case MMC_TIMING_UHS_SDR12:
+               timing = GB_SDIO_TIMING_UHS_SDR12;
+               break;
+       case MMC_TIMING_UHS_SDR25:
+               timing = GB_SDIO_TIMING_UHS_SDR25;
+               break;
+       case MMC_TIMING_UHS_SDR50:
+               timing = GB_SDIO_TIMING_UHS_SDR50;
+               break;
+       case MMC_TIMING_UHS_SDR104:
+               timing = GB_SDIO_TIMING_UHS_SDR104;
+               break;
+       case MMC_TIMING_UHS_DDR50:
+               timing = GB_SDIO_TIMING_UHS_DDR50;
+               break;
+#ifdef MMC_DDR52_DEFINED
+       case MMC_TIMING_MMC_DDR52:
+               timing = GB_SDIO_TIMING_MMC_DDR52;
+               break;
+#endif
+       case MMC_TIMING_MMC_HS200:
+               timing = GB_SDIO_TIMING_MMC_HS200;
+               break;
+#ifdef MMC_HS400_SUPPORTED
+       case MMC_TIMING_MMC_HS400:
+               timing = GB_SDIO_TIMING_MMC_HS400;
+               break;
+#endif
+       }
+       request.timing = timing;
+
+       switch (ios->signal_voltage) {
+       case MMC_SIGNAL_VOLTAGE_330:
+               signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_330;
+               break;
+       case MMC_SIGNAL_VOLTAGE_180:
+       default:
+               signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_180;
+               break;
+       case MMC_SIGNAL_VOLTAGE_120:
+               signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_120;
+               break;
+       }
+       request.signal_voltage = signal_voltage;
+
+       switch (ios->drv_type) {
+       case MMC_SET_DRIVER_TYPE_A:
+               drv_type = GB_SDIO_SET_DRIVER_TYPE_A;
+               break;
+       case MMC_SET_DRIVER_TYPE_C:
+               drv_type = GB_SDIO_SET_DRIVER_TYPE_C;
+               break;
+       case MMC_SET_DRIVER_TYPE_D:
+               drv_type = GB_SDIO_SET_DRIVER_TYPE_D;
+               break;
+       case MMC_SET_DRIVER_TYPE_B:
+       default:
+               drv_type = GB_SDIO_SET_DRIVER_TYPE_B;
+               break;
+       }
+       request.drv_type = drv_type;
+
+       ret = gb_sdio_set_ios(host, &request);
+       if (ret < 0)
+               goto out;
+
+       memcpy(&mmc->ios, ios, sizeof(mmc->ios));
+
+out:
+       mutex_unlock(&host->lock);
+}
+
+static int gb_mmc_get_ro(struct mmc_host *mmc)
+{
+       struct gb_sdio_host *host = mmc_priv(mmc);
+
+       mutex_lock(&host->lock);
+       if (host->removed) {
+               mutex_unlock(&host->lock);
+               return -ESHUTDOWN;
+       }
+       mutex_unlock(&host->lock);
+
+       return host->read_only;
+}
+
+static int gb_mmc_get_cd(struct mmc_host *mmc)
+{
+       struct gb_sdio_host *host = mmc_priv(mmc);
+
+       mutex_lock(&host->lock);
+       if (host->removed) {
+               mutex_unlock(&host->lock);
+               return -ESHUTDOWN;
+       }
+       mutex_unlock(&host->lock);
+
+       return host->card_present;
+}
+
+static int gb_mmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+       return 0;
+}
+
+static const struct mmc_host_ops gb_sdio_ops = {
+       .request        = gb_mmc_request,
+       .set_ios        = gb_mmc_set_ios,
+       .get_ro         = gb_mmc_get_ro,
+       .get_cd         = gb_mmc_get_cd,
+       .start_signal_voltage_switch    = gb_mmc_switch_voltage,
+};
+
+static int gb_sdio_probe(struct gbphy_device *gbphy_dev,
+                        const struct gbphy_device_id *id)
+{
+       struct gb_connection *connection;
+       struct mmc_host *mmc;
+       struct gb_sdio_host *host;
+       int ret = 0;
+
+       mmc = mmc_alloc_host(sizeof(*host), &gbphy_dev->dev);
+       if (!mmc)
+               return -ENOMEM;
+
+       connection = gb_connection_create(gbphy_dev->bundle,
+                                         le16_to_cpu(gbphy_dev->cport_desc->id),
+                                         gb_sdio_request_handler);
+       if (IS_ERR(connection)) {
+               ret = PTR_ERR(connection);
+               goto exit_mmc_free;
+       }
+
+       host = mmc_priv(mmc);
+       host->mmc = mmc;
+       host->removed = true;
+
+       host->connection = connection;
+       gb_connection_set_data(connection, host);
+       host->gbphy_dev = gbphy_dev;
+       gb_gbphy_set_data(gbphy_dev, host);
+
+       ret = gb_connection_enable_tx(connection);
+       if (ret)
+               goto exit_connection_destroy;
+
+       ret = gb_sdio_get_caps(host);
+       if (ret < 0)
+               goto exit_connection_disable;
+
+       mmc->ops = &gb_sdio_ops;
+
+       mmc->max_segs = host->mmc->max_blk_count;
+
+       /* for now we make a map 1:1 between max request and segment size */
+       mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+       mmc->max_seg_size = mmc->max_req_size;
+
+       mutex_init(&host->lock);
+       spin_lock_init(&host->xfer);
+       host->mrq_workqueue = alloc_workqueue("mmc-%s", 0, 1,
+                                             dev_name(&gbphy_dev->dev));
+       if (!host->mrq_workqueue) {
+               ret = -ENOMEM;
+               goto exit_connection_disable;
+       }
+       INIT_WORK(&host->mrqwork, gb_sdio_mrq_work);
+
+       ret = gb_connection_enable(connection);
+       if (ret)
+               goto exit_wq_destroy;
+
+       ret = mmc_add_host(mmc);
+       if (ret < 0)
+               goto exit_wq_destroy;
+       host->removed = false;
+       ret = _gb_sdio_process_events(host, host->queued_events);
+       host->queued_events = 0;
+
+       gbphy_runtime_put_autosuspend(gbphy_dev);
+
+       return ret;
+
+exit_wq_destroy:
+       destroy_workqueue(host->mrq_workqueue);
+exit_connection_disable:
+       gb_connection_disable(connection);
+exit_connection_destroy:
+       gb_connection_destroy(connection);
+exit_mmc_free:
+       mmc_free_host(mmc);
+
+       return ret;
+}
+
+static void gb_sdio_remove(struct gbphy_device *gbphy_dev)
+{
+       struct gb_sdio_host *host = gb_gbphy_get_data(gbphy_dev);
+       struct gb_connection *connection = host->connection;
+       struct mmc_host *mmc;
+       int ret;
+
+       ret = gbphy_runtime_get_sync(gbphy_dev);
+       if (ret)
+               gbphy_runtime_get_noresume(gbphy_dev);
+
+       mutex_lock(&host->lock);
+       host->removed = true;
+       mmc = host->mmc;
+       gb_connection_set_data(connection, NULL);
+       mutex_unlock(&host->lock);
+
+       flush_workqueue(host->mrq_workqueue);
+       destroy_workqueue(host->mrq_workqueue);
+       gb_connection_disable_rx(connection);
+       mmc_remove_host(mmc);
+       gb_connection_disable(connection);
+       gb_connection_destroy(connection);
+       mmc_free_host(mmc);
+}
+
+static const struct gbphy_device_id gb_sdio_id_table[] = {
+       { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO) },
+       { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_sdio_id_table);
+
+static struct gbphy_driver sdio_driver = {
+       .name           = "sdio",
+       .probe          = gb_sdio_probe,
+       .remove         = gb_sdio_remove,
+       .id_table       = gb_sdio_id_table,
+};
+
+module_gbphy_driver(sdio_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/spi.c b/drivers/staging/greybus/spi.c
new file mode 100644 (file)
index 0000000..bb76b3c
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * SPI bridge PHY driver.
+ *
+ * Copyright 2014-2016 Google Inc.
+ * Copyright 2014-2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/module.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+#include "spilib.h"
+
+#ifndef SPI_CORE_SUPPORT_PM
+static int gbphy_spi_prepare_transfer_hardware(struct device *dev)
+{
+       return gbphy_runtime_get_sync(to_gbphy_dev(dev));
+}
+
+static void gbphy_spi_unprepare_transfer_hardware(struct device *dev)
+{
+       gbphy_runtime_put_autosuspend(to_gbphy_dev(dev));
+}
+
+static struct spilib_ops __spilib_ops = {
+       .prepare_transfer_hardware = gbphy_spi_prepare_transfer_hardware,
+       .unprepare_transfer_hardware = gbphy_spi_unprepare_transfer_hardware,
+};
+
+static struct spilib_ops *spilib_ops = &__spilib_ops;
+#else
+static struct spilib_ops *spilib_ops = NULL;
+#endif
+
+static int gb_spi_probe(struct gbphy_device *gbphy_dev,
+                       const struct gbphy_device_id *id)
+{
+       struct gb_connection *connection;
+       int ret;
+
+       connection = gb_connection_create(gbphy_dev->bundle,
+                                         le16_to_cpu(gbphy_dev->cport_desc->id),
+                                         NULL);
+       if (IS_ERR(connection))
+               return PTR_ERR(connection);
+
+       ret = gb_connection_enable(connection);
+       if (ret)
+               goto exit_connection_destroy;
+
+       ret = gb_spilib_master_init(connection, &gbphy_dev->dev, spilib_ops);
+       if (ret)
+               goto exit_connection_disable;
+
+       gb_gbphy_set_data(gbphy_dev, connection);
+
+       gbphy_runtime_put_autosuspend(gbphy_dev);
+       return 0;
+
+exit_connection_disable:
+       gb_connection_disable(connection);
+exit_connection_destroy:
+       gb_connection_destroy(connection);
+
+       return ret;
+}
+
+static void gb_spi_remove(struct gbphy_device *gbphy_dev)
+{
+       struct gb_connection *connection = gb_gbphy_get_data(gbphy_dev);
+       int ret;
+
+       ret = gbphy_runtime_get_sync(gbphy_dev);
+       if (ret)
+               gbphy_runtime_get_noresume(gbphy_dev);
+
+       gb_spilib_master_exit(connection);
+       gb_connection_disable(connection);
+       gb_connection_destroy(connection);
+}
+
+static const struct gbphy_device_id gb_spi_id_table[] = {
+       { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SPI) },
+       { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_spi_id_table);
+
+static struct gbphy_driver spi_driver = {
+       .name           = "spi",
+       .probe          = gb_spi_probe,
+       .remove         = gb_spi_remove,
+       .id_table       = gb_spi_id_table,
+};
+
+module_gbphy_driver(spi_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c
new file mode 100644 (file)
index 0000000..9427c31
--- /dev/null
@@ -0,0 +1,567 @@
+/*
+ * Greybus SPI library
+ *
+ * Copyright 2014-2016 Google Inc.
+ * Copyright 2014-2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#include "greybus.h"
+#include "spilib.h"
+
+struct gb_spilib {
+       struct gb_connection    *connection;
+       struct device           *parent;
+       struct spi_transfer     *first_xfer;
+       struct spi_transfer     *last_xfer;
+       struct spilib_ops       *ops;
+       u32                     rx_xfer_offset;
+       u32                     tx_xfer_offset;
+       u32                     last_xfer_size;
+       unsigned int            op_timeout;
+       u16                     mode;
+       u16                     flags;
+       u32                     bits_per_word_mask;
+       u8                      num_chipselect;
+       u32                     min_speed_hz;
+       u32                     max_speed_hz;
+};
+
+#define GB_SPI_STATE_MSG_DONE          ((void *)0)
+#define GB_SPI_STATE_MSG_IDLE          ((void *)1)
+#define GB_SPI_STATE_MSG_RUNNING       ((void *)2)
+#define GB_SPI_STATE_OP_READY          ((void *)3)
+#define GB_SPI_STATE_OP_DONE           ((void *)4)
+#define GB_SPI_STATE_MSG_ERROR         ((void *)-1)
+
+#define XFER_TIMEOUT_TOLERANCE         200
+
+static struct spi_master *get_master_from_spi(struct gb_spilib *spi)
+{
+       return gb_connection_get_data(spi->connection);
+}
+
+static int tx_header_fit_operation(u32 tx_size, u32 count, size_t data_max)
+{
+       size_t headers_size;
+
+       data_max -= sizeof(struct gb_spi_transfer_request);
+       headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
+
+       return tx_size + headers_size > data_max ? 0 : 1;
+}
+
+static size_t calc_rx_xfer_size(u32 rx_size, u32 *tx_xfer_size, u32 len,
+                               size_t data_max)
+{
+       size_t rx_xfer_size;
+
+       data_max -= sizeof(struct gb_spi_transfer_response);
+
+       if (rx_size + len > data_max)
+               rx_xfer_size = data_max - rx_size;
+       else
+               rx_xfer_size = len;
+
+       /* if this is a write_read, for symmetry read the same as write */
+       if (*tx_xfer_size && rx_xfer_size > *tx_xfer_size)
+               rx_xfer_size = *tx_xfer_size;
+       if (*tx_xfer_size && rx_xfer_size < *tx_xfer_size)
+               *tx_xfer_size = rx_xfer_size;
+
+       return rx_xfer_size;
+}
+
+static size_t calc_tx_xfer_size(u32 tx_size, u32 count, size_t len,
+                               size_t data_max)
+{
+       size_t headers_size;
+
+       data_max -= sizeof(struct gb_spi_transfer_request);
+       headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
+
+       if (tx_size + headers_size + len > data_max)
+               return data_max - (tx_size + sizeof(struct gb_spi_transfer));
+
+       return len;
+}
+
+static void clean_xfer_state(struct gb_spilib *spi)
+{
+       spi->first_xfer = NULL;
+       spi->last_xfer = NULL;
+       spi->rx_xfer_offset = 0;
+       spi->tx_xfer_offset = 0;
+       spi->last_xfer_size = 0;
+       spi->op_timeout = 0;
+}
+
+static bool is_last_xfer_done(struct gb_spilib *spi)
+{
+       struct spi_transfer *last_xfer = spi->last_xfer;
+
+       if ((spi->tx_xfer_offset + spi->last_xfer_size == last_xfer->len) ||
+           (spi->rx_xfer_offset + spi->last_xfer_size == last_xfer->len))
+               return true;
+
+       return false;
+}
+
+static int setup_next_xfer(struct gb_spilib *spi, struct spi_message *msg)
+{
+       struct spi_transfer *last_xfer = spi->last_xfer;
+
+       if (msg->state != GB_SPI_STATE_OP_DONE)
+               return 0;
+
+       /*
+        * if we transferred all content of the last transfer, reset values and
+        * check if this was the last transfer in the message
+        */
+       if (is_last_xfer_done(spi)) {
+               spi->tx_xfer_offset = 0;
+               spi->rx_xfer_offset = 0;
+               spi->op_timeout = 0;
+               if (last_xfer == list_last_entry(&msg->transfers,
+                                                struct spi_transfer,
+                                                transfer_list))
+                       msg->state = GB_SPI_STATE_MSG_DONE;
+               else
+                       spi->first_xfer = list_next_entry(last_xfer,
+                                                         transfer_list);
+               return 0;
+       }
+
+       spi->first_xfer = last_xfer;
+       if (last_xfer->tx_buf)
+               spi->tx_xfer_offset += spi->last_xfer_size;
+
+       if (last_xfer->rx_buf)
+               spi->rx_xfer_offset += spi->last_xfer_size;
+
+       return 0;
+}
+
+static struct spi_transfer *get_next_xfer(struct spi_transfer *xfer,
+                                         struct spi_message *msg)
+{
+       if (xfer == list_last_entry(&msg->transfers, struct spi_transfer,
+                                   transfer_list))
+               return NULL;
+
+       return list_next_entry(xfer, transfer_list);
+}
+
+/* Routines to transfer data */
+static struct gb_operation *gb_spi_operation_create(struct gb_spilib *spi,
+               struct gb_connection *connection, struct spi_message *msg)
+{
+       struct gb_spi_transfer_request *request;
+       struct spi_device *dev = msg->spi;
+       struct spi_transfer *xfer;
+       struct gb_spi_transfer *gb_xfer;
+       struct gb_operation *operation;
+       u32 tx_size = 0, rx_size = 0, count = 0, xfer_len = 0, request_size;
+       u32 tx_xfer_size = 0, rx_xfer_size = 0, len;
+       u32 total_len = 0;
+       unsigned int xfer_timeout;
+       size_t data_max;
+       void *tx_data;
+
+       data_max = gb_operation_get_payload_size_max(connection);
+       xfer = spi->first_xfer;
+
+       /* Find number of transfers queued and tx/rx length in the message */
+
+       while (msg->state != GB_SPI_STATE_OP_READY) {
+               msg->state = GB_SPI_STATE_MSG_RUNNING;
+               spi->last_xfer = xfer;
+
+               if (!xfer->tx_buf && !xfer->rx_buf) {
+                       dev_err(spi->parent,
+                               "bufferless transfer, length %u\n", xfer->len);
+                       msg->state = GB_SPI_STATE_MSG_ERROR;
+                       return NULL;
+               }
+
+               tx_xfer_size = 0;
+               rx_xfer_size = 0;
+
+               if (xfer->tx_buf) {
+                       len = xfer->len - spi->tx_xfer_offset;
+                       if (!tx_header_fit_operation(tx_size, count, data_max))
+                               break;
+                       tx_xfer_size = calc_tx_xfer_size(tx_size, count,
+                                                        len, data_max);
+                       spi->last_xfer_size = tx_xfer_size;
+               }
+
+               if (xfer->rx_buf) {
+                       len = xfer->len - spi->rx_xfer_offset;
+                       rx_xfer_size = calc_rx_xfer_size(rx_size, &tx_xfer_size,
+                                                        len, data_max);
+                       spi->last_xfer_size = rx_xfer_size;
+               }
+
+               tx_size += tx_xfer_size;
+               rx_size += rx_xfer_size;
+
+               total_len += spi->last_xfer_size;
+               count++;
+
+               xfer = get_next_xfer(xfer, msg);
+               if (!xfer || total_len >= data_max)
+                       msg->state = GB_SPI_STATE_OP_READY;
+       }
+
+       /*
+        * In addition to space for all message descriptors we need
+        * to have enough to hold all tx data.
+        */
+       request_size = sizeof(*request);
+       request_size += count * sizeof(*gb_xfer);
+       request_size += tx_size;
+
+       /* Response consists only of incoming data */
+       operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER,
+                                       request_size, rx_size, GFP_KERNEL);
+       if (!operation)
+               return NULL;
+
+       request = operation->request->payload;
+       request->count = cpu_to_le16(count);
+       request->mode = dev->mode;
+       request->chip_select = dev->chip_select;
+
+       gb_xfer = &request->transfers[0];
+       tx_data = gb_xfer + count;      /* place tx data after last gb_xfer */
+
+       /* Fill in the transfers array */
+       xfer = spi->first_xfer;
+       while (msg->state != GB_SPI_STATE_OP_DONE) {
+               if (xfer == spi->last_xfer)
+                       xfer_len = spi->last_xfer_size;
+               else
+                       xfer_len = xfer->len;
+
+               /* make sure we do not timeout in a slow transfer */
+               xfer_timeout = xfer_len * 8 * MSEC_PER_SEC / xfer->speed_hz;
+               xfer_timeout += GB_OPERATION_TIMEOUT_DEFAULT;
+
+               if (xfer_timeout > spi->op_timeout)
+                       spi->op_timeout = xfer_timeout;
+
+               gb_xfer->speed_hz = cpu_to_le32(xfer->speed_hz);
+               gb_xfer->len = cpu_to_le32(xfer_len);
+               gb_xfer->delay_usecs = cpu_to_le16(xfer->delay_usecs);
+               gb_xfer->cs_change = xfer->cs_change;
+               gb_xfer->bits_per_word = xfer->bits_per_word;
+
+               /* Copy tx data */
+               if (xfer->tx_buf) {
+                       gb_xfer->xfer_flags |= GB_SPI_XFER_WRITE;
+                       memcpy(tx_data, xfer->tx_buf + spi->tx_xfer_offset,
+                              xfer_len);
+                       tx_data += xfer_len;
+               }
+
+               if (xfer->rx_buf)
+                       gb_xfer->xfer_flags |= GB_SPI_XFER_READ;
+
+               if (xfer == spi->last_xfer) {
+                       if (!is_last_xfer_done(spi))
+                               gb_xfer->xfer_flags |= GB_SPI_XFER_INPROGRESS;
+                       msg->state = GB_SPI_STATE_OP_DONE;
+                       continue;
+               }
+
+               gb_xfer++;
+               xfer = get_next_xfer(xfer, msg);
+       }
+
+       msg->actual_length += total_len;
+
+       return operation;
+}
+
+static void gb_spi_decode_response(struct gb_spilib *spi,
+                                  struct spi_message *msg,
+                                  struct gb_spi_transfer_response *response)
+{
+       struct spi_transfer *xfer = spi->first_xfer;
+       void *rx_data = response->data;
+       u32 xfer_len;
+
+       while (xfer) {
+               /* Copy rx data */
+               if (xfer->rx_buf) {
+                       if (xfer == spi->first_xfer)
+                               xfer_len = xfer->len - spi->rx_xfer_offset;
+                       else if (xfer == spi->last_xfer)
+                               xfer_len = spi->last_xfer_size;
+                       else
+                               xfer_len = xfer->len;
+
+                       memcpy(xfer->rx_buf + spi->rx_xfer_offset, rx_data,
+                              xfer_len);
+                       rx_data += xfer_len;
+               }
+
+               if (xfer == spi->last_xfer)
+                       break;
+
+               xfer = list_next_entry(xfer, transfer_list);
+       }
+}
+
+static int gb_spi_transfer_one_message(struct spi_master *master,
+                                      struct spi_message *msg)
+{
+       struct gb_spilib *spi = spi_master_get_devdata(master);
+       struct gb_connection *connection = spi->connection;
+       struct gb_spi_transfer_response *response;
+       struct gb_operation *operation;
+       int ret = 0;
+
+       spi->first_xfer = list_first_entry_or_null(&msg->transfers,
+                                                  struct spi_transfer,
+                                                  transfer_list);
+       if (!spi->first_xfer) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       msg->state = GB_SPI_STATE_MSG_IDLE;
+
+       while (msg->state != GB_SPI_STATE_MSG_DONE &&
+              msg->state != GB_SPI_STATE_MSG_ERROR) {
+               operation = gb_spi_operation_create(spi, connection, msg);
+               if (!operation) {
+                       msg->state = GB_SPI_STATE_MSG_ERROR;
+                       ret = -EINVAL;
+                       continue;
+               }
+
+               ret = gb_operation_request_send_sync_timeout(operation,
+                                                            spi->op_timeout);
+               if (!ret) {
+                       response = operation->response->payload;
+                       if (response)
+                               gb_spi_decode_response(spi, msg, response);
+               } else {
+                       dev_err(spi->parent,
+                               "transfer operation failed: %d\n", ret);
+                       msg->state = GB_SPI_STATE_MSG_ERROR;
+               }
+
+               gb_operation_put(operation);
+               setup_next_xfer(spi, msg);
+       }
+
+out:
+       msg->status = ret;
+       clean_xfer_state(spi);
+       spi_finalize_current_message(master);
+
+       return ret;
+}
+
+static int gb_spi_prepare_transfer_hardware(struct spi_master *master)
+{
+       struct gb_spilib *spi = spi_master_get_devdata(master);
+
+       return spi->ops->prepare_transfer_hardware(spi->parent);
+}
+
+static int gb_spi_unprepare_transfer_hardware(struct spi_master *master)
+{
+       struct gb_spilib *spi = spi_master_get_devdata(master);
+
+       spi->ops->unprepare_transfer_hardware(spi->parent);
+
+       return 0;
+}
+
+static int gb_spi_setup(struct spi_device *spi)
+{
+       /* Nothing to do for now */
+       return 0;
+}
+
+static void gb_spi_cleanup(struct spi_device *spi)
+{
+       /* Nothing to do for now */
+}
+
+/* Routines to get controller information */
+
+/*
+ * Map Greybus spi mode bits/flags/bpw into Linux ones.
+ * All bits are same for now and so these macro's return same values.
+ */
+#define gb_spi_mode_map(mode) mode
+#define gb_spi_flags_map(flags) flags
+
+static int gb_spi_get_master_config(struct gb_spilib *spi)
+{
+       struct gb_spi_master_config_response response;
+       u16 mode, flags;
+       int ret;
+
+       ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_MASTER_CONFIG,
+                               NULL, 0, &response, sizeof(response));
+       if (ret < 0)
+               return ret;
+
+       mode = le16_to_cpu(response.mode);
+       spi->mode = gb_spi_mode_map(mode);
+
+       flags = le16_to_cpu(response.flags);
+       spi->flags = gb_spi_flags_map(flags);
+
+       spi->bits_per_word_mask = le32_to_cpu(response.bits_per_word_mask);
+       spi->num_chipselect = response.num_chipselect;
+
+       spi->min_speed_hz = le32_to_cpu(response.min_speed_hz);
+       spi->max_speed_hz = le32_to_cpu(response.max_speed_hz);
+
+       return 0;
+}
+
+static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
+{
+       struct spi_master *master = get_master_from_spi(spi);
+       struct gb_spi_device_config_request request;
+       struct gb_spi_device_config_response response;
+       struct spi_board_info spi_board = { {0} };
+       struct spi_device *spidev;
+       int ret;
+       u8 dev_type;
+
+       request.chip_select = cs;
+
+       ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_DEVICE_CONFIG,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret < 0)
+               return ret;
+
+       dev_type = response.device_type;
+
+       if (dev_type == GB_SPI_SPI_DEV)
+               strlcpy(spi_board.modalias, SPI_DEV_MODALIAS,
+                       sizeof(spi_board.modalias));
+       else if (dev_type == GB_SPI_SPI_NOR)
+               strlcpy(spi_board.modalias, SPI_NOR_MODALIAS,
+                       sizeof(spi_board.modalias));
+       else if (dev_type == GB_SPI_SPI_MODALIAS)
+               memcpy(spi_board.modalias, response.name,
+                      sizeof(spi_board.modalias));
+       else
+               return -EINVAL;
+
+       spi_board.mode          = le16_to_cpu(response.mode);
+       spi_board.bus_num       = master->bus_num;
+       spi_board.chip_select   = cs;
+       spi_board.max_speed_hz  = le32_to_cpu(response.max_speed_hz);
+
+       spidev = spi_new_device(master, &spi_board);
+       if (!spidev)
+               return -EINVAL;
+
+       return 0;
+}
+
+int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
+                         struct spilib_ops *ops)
+{
+       struct gb_spilib *spi;
+       struct spi_master *master;
+       int ret;
+       u8 i;
+
+       /* Allocate master with space for data */
+       master = spi_alloc_master(dev, sizeof(*spi));
+       if (!master) {
+               dev_err(dev, "cannot alloc SPI master\n");
+               return -ENOMEM;
+       }
+
+       spi = spi_master_get_devdata(master);
+       spi->connection = connection;
+       gb_connection_set_data(connection, master);
+       spi->parent = dev;
+       spi->ops = ops;
+
+       /* get master configuration */
+       ret = gb_spi_get_master_config(spi);
+       if (ret)
+               goto exit_spi_put;
+
+       master->bus_num = -1; /* Allow spi-core to allocate it dynamically */
+       master->num_chipselect = spi->num_chipselect;
+       master->mode_bits = spi->mode;
+       master->flags = spi->flags;
+       master->bits_per_word_mask = spi->bits_per_word_mask;
+
+       /* Attach methods */
+       master->cleanup = gb_spi_cleanup;
+       master->setup = gb_spi_setup;
+       master->transfer_one_message = gb_spi_transfer_one_message;
+
+       if (ops && ops->prepare_transfer_hardware) {
+               master->prepare_transfer_hardware =
+                       gb_spi_prepare_transfer_hardware;
+       }
+
+       if (ops && ops->unprepare_transfer_hardware) {
+               master->unprepare_transfer_hardware =
+                       gb_spi_unprepare_transfer_hardware;
+       }
+
+#ifdef SPI_CORE_SUPPORT_PM
+       master->auto_runtime_pm = true;
+#endif
+
+       ret = spi_register_master(master);
+       if (ret < 0)
+               goto exit_spi_put;
+
+       /* now, fetch the devices configuration */
+       for (i = 0; i < spi->num_chipselect; i++) {
+               ret = gb_spi_setup_device(spi, i);
+               if (ret < 0) {
+                       dev_err(dev, "failed to allocate spi device %d: %d\n",
+                               i, ret);
+                       goto exit_spi_unregister;
+               }
+       }
+
+       return 0;
+
+exit_spi_unregister:
+       spi_unregister_master(master);
+exit_spi_put:
+       spi_master_put(master);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_spilib_master_init);
+
+void gb_spilib_master_exit(struct gb_connection *connection)
+{
+       struct spi_master *master = gb_connection_get_data(connection);
+
+       spi_unregister_master(master);
+       spi_master_put(master);
+}
+EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/spilib.h b/drivers/staging/greybus/spilib.h
new file mode 100644 (file)
index 0000000..566d0dd
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Greybus SPI library header
+ *
+ * copyright 2016 google inc.
+ * copyright 2016 linaro ltd.
+ *
+ * released under the gplv2 only.
+ */
+
+#ifndef __SPILIB_H
+#define __SPILIB_H
+
+struct device;
+struct gb_connection;
+
+struct spilib_ops {
+       int (*prepare_transfer_hardware)(struct device *dev);
+       void (*unprepare_transfer_hardware)(struct device *dev);
+};
+
+int gb_spilib_master_init(struct gb_connection *connection, struct device *dev, struct spilib_ops *ops);
+void gb_spilib_master_exit(struct gb_connection *connection);
+
+#endif /* __SPILIB_H */
diff --git a/drivers/staging/greybus/svc.c b/drivers/staging/greybus/svc.c
new file mode 100644 (file)
index 0000000..550055e
--- /dev/null
@@ -0,0 +1,1486 @@
+/*
+ * SVC Greybus driver.
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+
+#include "greybus.h"
+
+#define SVC_INTF_EJECT_TIMEOUT         9000
+#define SVC_INTF_ACTIVATE_TIMEOUT      6000
+#define SVC_INTF_RESUME_TIMEOUT                3000
+
+struct gb_svc_deferred_request {
+       struct work_struct work;
+       struct gb_operation *operation;
+};
+
+
+static int gb_svc_queue_deferred_request(struct gb_operation *operation);
+
+static ssize_t endo_id_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+
+       return sprintf(buf, "0x%04x\n", svc->endo_id);
+}
+static DEVICE_ATTR_RO(endo_id);
+
+static ssize_t ap_intf_id_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+
+       return sprintf(buf, "%u\n", svc->ap_intf_id);
+}
+static DEVICE_ATTR_RO(ap_intf_id);
+
+// FIXME
+// This is a hack, we need to do this "right" and clean the interface up
+// properly, not just forcibly yank the thing out of the system and hope for the
+// best.  But for now, people want their modules to come out without having to
+// throw the thing to the ground or get out a screwdriver.
+static ssize_t intf_eject_store(struct device *dev,
+                               struct device_attribute *attr, const char *buf,
+                               size_t len)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+       unsigned short intf_id;
+       int ret;
+
+       ret = kstrtou16(buf, 10, &intf_id);
+       if (ret < 0)
+               return ret;
+
+       dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
+
+       ret = gb_svc_intf_eject(svc, intf_id);
+       if (ret < 0)
+               return ret;
+
+       return len;
+}
+static DEVICE_ATTR_WO(intf_eject);
+
+static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+
+       return sprintf(buf, "%s\n",
+                      gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
+}
+
+static ssize_t watchdog_store(struct device *dev,
+                             struct device_attribute *attr, const char *buf,
+                             size_t len)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+       int retval;
+       bool user_request;
+
+       retval = strtobool(buf, &user_request);
+       if (retval)
+               return retval;
+
+       if (user_request)
+               retval = gb_svc_watchdog_enable(svc);
+       else
+               retval = gb_svc_watchdog_disable(svc);
+       if (retval)
+               return retval;
+       return len;
+}
+static DEVICE_ATTR_RW(watchdog);
+
+static ssize_t watchdog_action_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+
+       if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
+               return sprintf(buf, "panic\n");
+       else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
+               return sprintf(buf, "reset\n");
+
+       return -EINVAL;
+}
+
+static ssize_t watchdog_action_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t len)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+
+       if (sysfs_streq(buf, "panic"))
+               svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
+       else if (sysfs_streq(buf, "reset"))
+               svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
+       else
+               return -EINVAL;
+
+       return len;
+}
+static DEVICE_ATTR_RW(watchdog_action);
+
+static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
+{
+       struct gb_svc_pwrmon_rail_count_get_response response;
+       int ret;
+
+       ret = gb_operation_sync(svc->connection,
+                               GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
+               return ret;
+       }
+
+       *value = response.rail_count;
+
+       return 0;
+}
+
+static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
+               struct gb_svc_pwrmon_rail_names_get_response *response,
+               size_t bufsize)
+{
+       int ret;
+
+       ret = gb_operation_sync(svc->connection,
+                               GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
+                               response, bufsize);
+       if (ret) {
+               dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
+               return ret;
+       }
+
+       if (response->status != GB_SVC_OP_SUCCESS) {
+               dev_err(&svc->dev,
+                       "SVC error while getting rail names: %u\n",
+                       response->status);
+               return -EREMOTEIO;
+       }
+
+       return 0;
+}
+
+static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
+                                   u8 measurement_type, u32 *value)
+{
+       struct gb_svc_pwrmon_sample_get_request request;
+       struct gb_svc_pwrmon_sample_get_response response;
+       int ret;
+
+       request.rail_id = rail_id;
+       request.measurement_type = measurement_type;
+
+       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
+               return ret;
+       }
+
+       if (response.result) {
+               dev_err(&svc->dev,
+                       "UniPro error while getting rail power sample (%d %d): %d\n",
+                       rail_id, measurement_type, response.result);
+               switch (response.result) {
+               case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
+                       return -EINVAL;
+               case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
+                       return -ENOMSG;
+               default:
+                       return -EREMOTEIO;
+               }
+       }
+
+       *value = le32_to_cpu(response.measurement);
+
+       return 0;
+}
+
+int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
+                                 u8 measurement_type, u32 *value)
+{
+       struct gb_svc_pwrmon_intf_sample_get_request request;
+       struct gb_svc_pwrmon_intf_sample_get_response response;
+       int ret;
+
+       request.intf_id = intf_id;
+       request.measurement_type = measurement_type;
+
+       ret = gb_operation_sync(svc->connection,
+                               GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
+               return ret;
+       }
+
+       if (response.result) {
+               dev_err(&svc->dev,
+                       "UniPro error while getting intf power sample (%d %d): %d\n",
+                       intf_id, measurement_type, response.result);
+               switch (response.result) {
+               case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
+                       return -EINVAL;
+               case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
+                       return -ENOMSG;
+               default:
+                       return -EREMOTEIO;
+               }
+       }
+
+       *value = le32_to_cpu(response.measurement);
+
+       return 0;
+}
+
+static struct attribute *svc_attrs[] = {
+       &dev_attr_endo_id.attr,
+       &dev_attr_ap_intf_id.attr,
+       &dev_attr_intf_eject.attr,
+       &dev_attr_watchdog.attr,
+       &dev_attr_watchdog_action.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(svc);
+
+int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
+{
+       struct gb_svc_intf_device_id_request request;
+
+       request.intf_id = intf_id;
+       request.device_id = device_id;
+
+       return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
+                                &request, sizeof(request), NULL, 0);
+}
+
+int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
+{
+       struct gb_svc_intf_eject_request request;
+       int ret;
+
+       request.intf_id = intf_id;
+
+       /*
+        * The pulse width for module release in svc is long so we need to
+        * increase the timeout so the operation will not return to soon.
+        */
+       ret = gb_operation_sync_timeout(svc->connection,
+                                       GB_SVC_TYPE_INTF_EJECT, &request,
+                                       sizeof(request), NULL, 0,
+                                       SVC_INTF_EJECT_TIMEOUT);
+       if (ret) {
+               dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
+               return ret;
+       }
+
+       return 0;
+}
+
+int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
+{
+       struct gb_svc_intf_vsys_request request;
+       struct gb_svc_intf_vsys_response response;
+       int type, ret;
+
+       request.intf_id = intf_id;
+
+       if (enable)
+               type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
+       else
+               type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
+
+       ret = gb_operation_sync(svc->connection, type,
+                       &request, sizeof(request),
+                       &response, sizeof(response));
+       if (ret < 0)
+               return ret;
+       if (response.result_code != GB_SVC_INTF_VSYS_OK)
+               return -EREMOTEIO;
+       return 0;
+}
+
+int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
+{
+       struct gb_svc_intf_refclk_request request;
+       struct gb_svc_intf_refclk_response response;
+       int type, ret;
+
+       request.intf_id = intf_id;
+
+       if (enable)
+               type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
+       else
+               type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
+
+       ret = gb_operation_sync(svc->connection, type,
+                       &request, sizeof(request),
+                       &response, sizeof(response));
+       if (ret < 0)
+               return ret;
+       if (response.result_code != GB_SVC_INTF_REFCLK_OK)
+               return -EREMOTEIO;
+       return 0;
+}
+
+int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
+{
+       struct gb_svc_intf_unipro_request request;
+       struct gb_svc_intf_unipro_response response;
+       int type, ret;
+
+       request.intf_id = intf_id;
+
+       if (enable)
+               type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
+       else
+               type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
+
+       ret = gb_operation_sync(svc->connection, type,
+                       &request, sizeof(request),
+                       &response, sizeof(response));
+       if (ret < 0)
+               return ret;
+       if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
+               return -EREMOTEIO;
+       return 0;
+}
+
+int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
+{
+       struct gb_svc_intf_activate_request request;
+       struct gb_svc_intf_activate_response response;
+       int ret;
+
+       request.intf_id = intf_id;
+
+       ret = gb_operation_sync_timeout(svc->connection,
+                       GB_SVC_TYPE_INTF_ACTIVATE,
+                       &request, sizeof(request),
+                       &response, sizeof(response),
+                       SVC_INTF_ACTIVATE_TIMEOUT);
+       if (ret < 0)
+               return ret;
+       if (response.status != GB_SVC_OP_SUCCESS) {
+               dev_err(&svc->dev, "failed to activate interface %u: %u\n",
+                               intf_id, response.status);
+               return -EREMOTEIO;
+       }
+
+       *intf_type = response.intf_type;
+
+       return 0;
+}
+
+int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
+{
+       struct gb_svc_intf_resume_request request;
+       struct gb_svc_intf_resume_response response;
+       int ret;
+
+       request.intf_id = intf_id;
+
+       ret = gb_operation_sync_timeout(svc->connection,
+                                       GB_SVC_TYPE_INTF_RESUME,
+                                       &request, sizeof(request),
+                                       &response, sizeof(response),
+                                       SVC_INTF_RESUME_TIMEOUT);
+       if (ret < 0) {
+               dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
+                       intf_id, ret);
+               return ret;
+       }
+
+       if (response.status != GB_SVC_OP_SUCCESS) {
+               dev_err(&svc->dev, "failed to resume interface %u: %u\n",
+                       intf_id, response.status);
+               return -EREMOTEIO;
+       }
+
+       return 0;
+}
+
+int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
+                       u32 *value)
+{
+       struct gb_svc_dme_peer_get_request request;
+       struct gb_svc_dme_peer_get_response response;
+       u16 result;
+       int ret;
+
+       request.intf_id = intf_id;
+       request.attr = cpu_to_le16(attr);
+       request.selector = cpu_to_le16(selector);
+
+       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
+                               intf_id, attr, selector, ret);
+               return ret;
+       }
+
+       result = le16_to_cpu(response.result_code);
+       if (result) {
+               dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
+                               intf_id, attr, selector, result);
+               return -EREMOTEIO;
+       }
+
+       if (value)
+               *value = le32_to_cpu(response.attr_value);
+
+       return 0;
+}
+
+int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
+                       u32 value)
+{
+       struct gb_svc_dme_peer_set_request request;
+       struct gb_svc_dme_peer_set_response response;
+       u16 result;
+       int ret;
+
+       request.intf_id = intf_id;
+       request.attr = cpu_to_le16(attr);
+       request.selector = cpu_to_le16(selector);
+       request.value = cpu_to_le32(value);
+
+       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
+                               intf_id, attr, selector, value, ret);
+               return ret;
+       }
+
+       result = le16_to_cpu(response.result_code);
+       if (result) {
+               dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
+                               intf_id, attr, selector, value, result);
+               return -EREMOTEIO;
+       }
+
+       return 0;
+}
+
+int gb_svc_connection_create(struct gb_svc *svc,
+                               u8 intf1_id, u16 cport1_id,
+                               u8 intf2_id, u16 cport2_id,
+                               u8 cport_flags)
+{
+       struct gb_svc_conn_create_request request;
+
+       request.intf1_id = intf1_id;
+       request.cport1_id = cpu_to_le16(cport1_id);
+       request.intf2_id = intf2_id;
+       request.cport2_id = cpu_to_le16(cport2_id);
+       request.tc = 0;         /* TC0 */
+       request.flags = cport_flags;
+
+       return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
+                                &request, sizeof(request), NULL, 0);
+}
+
+void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
+                              u8 intf2_id, u16 cport2_id)
+{
+       struct gb_svc_conn_destroy_request request;
+       struct gb_connection *connection = svc->connection;
+       int ret;
+
+       request.intf1_id = intf1_id;
+       request.cport1_id = cpu_to_le16(cport1_id);
+       request.intf2_id = intf2_id;
+       request.cport2_id = cpu_to_le16(cport2_id);
+
+       ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
+                               &request, sizeof(request), NULL, 0);
+       if (ret) {
+               dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
+                               intf1_id, cport1_id, intf2_id, cport2_id, ret);
+       }
+}
+
+int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
+                          u32 strobe_delay, u32 refclk)
+{
+       struct gb_connection *connection = svc->connection;
+       struct gb_svc_timesync_enable_request request;
+
+       request.count = count;
+       request.frame_time = cpu_to_le64(frame_time);
+       request.strobe_delay = cpu_to_le32(strobe_delay);
+       request.refclk = cpu_to_le32(refclk);
+       return gb_operation_sync(connection,
+                                GB_SVC_TYPE_TIMESYNC_ENABLE,
+                                &request, sizeof(request), NULL, 0);
+}
+
+int gb_svc_timesync_disable(struct gb_svc *svc)
+{
+       struct gb_connection *connection = svc->connection;
+
+       return gb_operation_sync(connection,
+                                GB_SVC_TYPE_TIMESYNC_DISABLE,
+                                NULL, 0, NULL, 0);
+}
+
+int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time)
+{
+       struct gb_connection *connection = svc->connection;
+       struct gb_svc_timesync_authoritative_response response;
+       int ret, i;
+
+       ret = gb_operation_sync(connection,
+                               GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE, NULL, 0,
+                               &response, sizeof(response));
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
+               frame_time[i] = le64_to_cpu(response.frame_time[i]);
+       return 0;
+}
+
+int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time)
+{
+       struct gb_connection *connection = svc->connection;
+       struct gb_svc_timesync_ping_response response;
+       int ret;
+
+       ret = gb_operation_sync(connection,
+                               GB_SVC_TYPE_TIMESYNC_PING,
+                               NULL, 0,
+                               &response, sizeof(response));
+       if (ret < 0)
+               return ret;
+
+       *frame_time = le64_to_cpu(response.frame_time);
+       return 0;
+}
+
+int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask)
+{
+       struct gb_connection *connection = svc->connection;
+       struct gb_svc_timesync_wake_pins_acquire_request request;
+
+       request.strobe_mask = cpu_to_le32(strobe_mask);
+       return gb_operation_sync(connection,
+                                GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE,
+                                &request, sizeof(request),
+                                NULL, 0);
+}
+
+int gb_svc_timesync_wake_pins_release(struct gb_svc *svc)
+{
+       struct gb_connection *connection = svc->connection;
+
+       return gb_operation_sync(connection,
+                                GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE,
+                                NULL, 0, NULL, 0);
+}
+
+/* Creates bi-directional routes between the devices */
+int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
+                              u8 intf2_id, u8 dev2_id)
+{
+       struct gb_svc_route_create_request request;
+
+       request.intf1_id = intf1_id;
+       request.dev1_id = dev1_id;
+       request.intf2_id = intf2_id;
+       request.dev2_id = dev2_id;
+
+       return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
+                                &request, sizeof(request), NULL, 0);
+}
+
+/* Destroys bi-directional routes between the devices */
+void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
+{
+       struct gb_svc_route_destroy_request request;
+       int ret;
+
+       request.intf1_id = intf1_id;
+       request.intf2_id = intf2_id;
+
+       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
+                               &request, sizeof(request), NULL, 0);
+       if (ret) {
+               dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
+                               intf1_id, intf2_id, ret);
+       }
+}
+
+int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
+                              u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
+                              u8 tx_amplitude, u8 tx_hs_equalizer,
+                              u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
+                              u8 flags, u32 quirks,
+                              struct gb_svc_l2_timer_cfg *local,
+                              struct gb_svc_l2_timer_cfg *remote)
+{
+       struct gb_svc_intf_set_pwrm_request request;
+       struct gb_svc_intf_set_pwrm_response response;
+       int ret;
+       u16 result_code;
+
+       memset(&request, 0, sizeof(request));
+
+       request.intf_id = intf_id;
+       request.hs_series = hs_series;
+       request.tx_mode = tx_mode;
+       request.tx_gear = tx_gear;
+       request.tx_nlanes = tx_nlanes;
+       request.tx_amplitude = tx_amplitude;
+       request.tx_hs_equalizer = tx_hs_equalizer;
+       request.rx_mode = rx_mode;
+       request.rx_gear = rx_gear;
+       request.rx_nlanes = rx_nlanes;
+       request.flags = flags;
+       request.quirks = cpu_to_le32(quirks);
+       if (local)
+               request.local_l2timerdata = *local;
+       if (remote)
+               request.remote_l2timerdata = *remote;
+
+       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret < 0)
+               return ret;
+
+       result_code = response.result_code;
+       if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
+               dev_err(&svc->dev, "set power mode = %d\n", result_code);
+               return -EIO;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
+
+int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
+{
+       struct gb_svc_intf_set_pwrm_request request;
+       struct gb_svc_intf_set_pwrm_response response;
+       int ret;
+       u16 result_code;
+
+       memset(&request, 0, sizeof(request));
+
+       request.intf_id = intf_id;
+       request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
+       request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
+       request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
+
+       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret < 0) {
+               dev_err(&svc->dev,
+                       "failed to send set power mode operation to interface %u: %d\n",
+                       intf_id, ret);
+               return ret;
+       }
+
+       result_code = response.result_code;
+       if (result_code != GB_SVC_SETPWRM_PWR_OK) {
+               dev_err(&svc->dev,
+                       "failed to hibernate the link for interface %u: %u\n",
+                       intf_id, result_code);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+int gb_svc_ping(struct gb_svc *svc)
+{
+       return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
+                                        NULL, 0, NULL, 0,
+                                        GB_OPERATION_TIMEOUT_DEFAULT * 2);
+}
+
+static int gb_svc_version_request(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       struct gb_svc_version_request *request;
+       struct gb_svc_version_response *response;
+
+       if (op->request->payload_size < sizeof(*request)) {
+               dev_err(&svc->dev, "short version request (%zu < %zu)\n",
+                               op->request->payload_size,
+                               sizeof(*request));
+               return -EINVAL;
+       }
+
+       request = op->request->payload;
+
+       if (request->major > GB_SVC_VERSION_MAJOR) {
+               dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
+                               request->major, GB_SVC_VERSION_MAJOR);
+               return -ENOTSUPP;
+       }
+
+       svc->protocol_major = request->major;
+       svc->protocol_minor = request->minor;
+
+       if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
+               return -ENOMEM;
+
+       response = op->response->payload;
+       response->major = svc->protocol_major;
+       response->minor = svc->protocol_minor;
+
+       return 0;
+}
+
+static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
+                                       size_t len, loff_t *offset)
+{
+       struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+       struct gb_svc *svc = pwrmon_rails->svc;
+       int ret, desc;
+       u32 value;
+       char buff[16];
+
+       ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
+                                      GB_SVC_PWRMON_TYPE_VOL, &value);
+       if (ret) {
+               dev_err(&svc->dev,
+                       "failed to get voltage sample %u: %d\n",
+                       pwrmon_rails->id, ret);
+               return ret;
+       }
+
+       desc = scnprintf(buff, sizeof(buff), "%u\n", value);
+
+       return simple_read_from_buffer(buf, len, offset, buff, desc);
+}
+
+static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
+                                       size_t len, loff_t *offset)
+{
+       struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+       struct gb_svc *svc = pwrmon_rails->svc;
+       int ret, desc;
+       u32 value;
+       char buff[16];
+
+       ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
+                                      GB_SVC_PWRMON_TYPE_CURR, &value);
+       if (ret) {
+               dev_err(&svc->dev,
+                       "failed to get current sample %u: %d\n",
+                       pwrmon_rails->id, ret);
+               return ret;
+       }
+
+       desc = scnprintf(buff, sizeof(buff), "%u\n", value);
+
+       return simple_read_from_buffer(buf, len, offset, buff, desc);
+}
+
+static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
+                                     size_t len, loff_t *offset)
+{
+       struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+       struct gb_svc *svc = pwrmon_rails->svc;
+       int ret, desc;
+       u32 value;
+       char buff[16];
+
+       ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
+                                      GB_SVC_PWRMON_TYPE_PWR, &value);
+       if (ret) {
+               dev_err(&svc->dev, "failed to get power sample %u: %d\n",
+                       pwrmon_rails->id, ret);
+               return ret;
+       }
+
+       desc = scnprintf(buff, sizeof(buff), "%u\n", value);
+
+       return simple_read_from_buffer(buf, len, offset, buff, desc);
+}
+
+static const struct file_operations pwrmon_debugfs_voltage_fops = {
+       .read           = pwr_debugfs_voltage_read,
+};
+
+static const struct file_operations pwrmon_debugfs_current_fops = {
+       .read           = pwr_debugfs_current_read,
+};
+
+static const struct file_operations pwrmon_debugfs_power_fops = {
+       .read           = pwr_debugfs_power_read,
+};
+
+static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
+{
+       int i;
+       size_t bufsize;
+       struct dentry *dent;
+       struct gb_svc_pwrmon_rail_names_get_response *rail_names;
+       u8 rail_count;
+
+       dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
+       if (IS_ERR_OR_NULL(dent))
+               return;
+
+       if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
+               goto err_pwrmon_debugfs;
+
+       if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
+               goto err_pwrmon_debugfs;
+
+       bufsize = sizeof(*rail_names) +
+               GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
+
+       rail_names = kzalloc(bufsize, GFP_KERNEL);
+       if (!rail_names)
+               goto err_pwrmon_debugfs;
+
+       svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
+                                   GFP_KERNEL);
+       if (!svc->pwrmon_rails)
+               goto err_pwrmon_debugfs_free;
+
+       if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
+               goto err_pwrmon_debugfs_free;
+
+       for (i = 0; i < rail_count; i++) {
+               struct dentry *dir;
+               struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
+               char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
+
+               snprintf(fname, sizeof(fname), "%s",
+                        (char *)&rail_names->name[i]);
+
+               rail->id = i;
+               rail->svc = svc;
+
+               dir = debugfs_create_dir(fname, dent);
+               debugfs_create_file("voltage_now", S_IRUGO, dir, rail,
+                                   &pwrmon_debugfs_voltage_fops);
+               debugfs_create_file("current_now", S_IRUGO, dir, rail,
+                                   &pwrmon_debugfs_current_fops);
+               debugfs_create_file("power_now", S_IRUGO, dir, rail,
+                                   &pwrmon_debugfs_power_fops);
+       }
+
+       kfree(rail_names);
+       return;
+
+err_pwrmon_debugfs_free:
+       kfree(rail_names);
+       kfree(svc->pwrmon_rails);
+       svc->pwrmon_rails = NULL;
+
+err_pwrmon_debugfs:
+       debugfs_remove(dent);
+}
+
+static void gb_svc_debugfs_init(struct gb_svc *svc)
+{
+       svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
+                                                gb_debugfs_get());
+       gb_svc_pwrmon_debugfs_init(svc);
+}
+
+static void gb_svc_debugfs_exit(struct gb_svc *svc)
+{
+       debugfs_remove_recursive(svc->debugfs_dentry);
+       kfree(svc->pwrmon_rails);
+       svc->pwrmon_rails = NULL;
+}
+
+static int gb_svc_hello(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       struct gb_svc_hello_request *hello_request;
+       int ret;
+
+       if (op->request->payload_size < sizeof(*hello_request)) {
+               dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
+                               op->request->payload_size,
+                               sizeof(*hello_request));
+               return -EINVAL;
+       }
+
+       hello_request = op->request->payload;
+       svc->endo_id = le16_to_cpu(hello_request->endo_id);
+       svc->ap_intf_id = hello_request->interface_id;
+
+       ret = device_add(&svc->dev);
+       if (ret) {
+               dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
+               return ret;
+       }
+
+       ret = gb_svc_watchdog_create(svc);
+       if (ret) {
+               dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
+               goto err_unregister_device;
+       }
+
+       gb_svc_debugfs_init(svc);
+
+       ret = gb_timesync_svc_add(svc);
+       if (ret) {
+               dev_err(&svc->dev, "failed to add SVC to timesync: %d\n", ret);
+               gb_svc_debugfs_exit(svc);
+               goto err_unregister_device;
+       }
+
+       return gb_svc_queue_deferred_request(op);
+
+err_unregister_device:
+       gb_svc_watchdog_destroy(svc);
+       device_del(&svc->dev);
+       return ret;
+}
+
+static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
+                                                       u8 intf_id)
+{
+       struct gb_host_device *hd = svc->hd;
+       struct gb_module *module;
+       size_t num_interfaces;
+       u8 module_id;
+
+       list_for_each_entry(module, &hd->modules, hd_node) {
+               module_id = module->module_id;
+               num_interfaces = module->num_interfaces;
+
+               if (intf_id >= module_id &&
+                               intf_id < module_id + num_interfaces) {
+                       return module->interfaces[intf_id - module_id];
+               }
+       }
+
+       return NULL;
+}
+
+static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
+{
+       struct gb_host_device *hd = svc->hd;
+       struct gb_module *module;
+
+       list_for_each_entry(module, &hd->modules, hd_node) {
+               if (module->module_id == module_id)
+                       return module;
+       }
+
+       return NULL;
+}
+
+static void gb_svc_process_hello_deferred(struct gb_operation *operation)
+{
+       struct gb_connection *connection = operation->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       int ret;
+
+       /*
+        * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
+        * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
+        * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
+        * module.
+        *
+        * The code should be removed once SW-2217, Heuristic for UniPro
+        * Power Mode Changes is resolved.
+        */
+       ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
+                                       GB_SVC_UNIPRO_HS_SERIES_A,
+                                       GB_SVC_UNIPRO_SLOW_AUTO_MODE,
+                                       2, 1,
+                                       GB_SVC_SMALL_AMPLITUDE, GB_SVC_NO_DE_EMPHASIS,
+                                       GB_SVC_UNIPRO_SLOW_AUTO_MODE,
+                                       2, 1,
+                                       0, 0,
+                                       NULL, NULL);
+
+       if (ret)
+               dev_warn(&svc->dev,
+                       "power mode change failed on AP to switch link: %d\n",
+                       ret);
+}
+
+static void gb_svc_process_module_inserted(struct gb_operation *operation)
+{
+       struct gb_svc_module_inserted_request *request;
+       struct gb_connection *connection = operation->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       struct gb_host_device *hd = svc->hd;
+       struct gb_module *module;
+       size_t num_interfaces;
+       u8 module_id;
+       u16 flags;
+       int ret;
+
+       /* The request message size has already been verified. */
+       request = operation->request->payload;
+       module_id = request->primary_intf_id;
+       num_interfaces = request->intf_count;
+       flags = le16_to_cpu(request->flags);
+
+       dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
+                       __func__, module_id, num_interfaces, flags);
+
+       if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
+               dev_warn(&svc->dev, "no primary interface detected on module %u\n",
+                               module_id);
+       }
+
+       module = gb_svc_module_lookup(svc, module_id);
+       if (module) {
+               dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
+                               module_id);
+               return;
+       }
+
+       module = gb_module_create(hd, module_id, num_interfaces);
+       if (!module) {
+               dev_err(&svc->dev, "failed to create module\n");
+               return;
+       }
+
+       ret = gb_module_add(module);
+       if (ret) {
+               gb_module_put(module);
+               return;
+       }
+
+       list_add(&module->hd_node, &hd->modules);
+}
+
+static void gb_svc_process_module_removed(struct gb_operation *operation)
+{
+       struct gb_svc_module_removed_request *request;
+       struct gb_connection *connection = operation->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       struct gb_module *module;
+       u8 module_id;
+
+       /* The request message size has already been verified. */
+       request = operation->request->payload;
+       module_id = request->primary_intf_id;
+
+       dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
+
+       module = gb_svc_module_lookup(svc, module_id);
+       if (!module) {
+               dev_warn(&svc->dev, "unexpected module-removed event %u\n",
+                               module_id);
+               return;
+       }
+
+       module->disconnected = true;
+
+       gb_module_del(module);
+       list_del(&module->hd_node);
+       gb_module_put(module);
+}
+
+static void gb_svc_process_intf_oops(struct gb_operation *operation)
+{
+       struct gb_svc_intf_oops_request *request;
+       struct gb_connection *connection = operation->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       struct gb_interface *intf;
+       u8 intf_id;
+       u8 reason;
+
+       /* The request message size has already been verified. */
+       request = operation->request->payload;
+       intf_id = request->intf_id;
+       reason = request->reason;
+
+       intf = gb_svc_interface_lookup(svc, intf_id);
+       if (!intf) {
+               dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
+                        intf_id);
+               return;
+       }
+
+       dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
+                intf_id, reason);
+
+       mutex_lock(&intf->mutex);
+       intf->disconnected = true;
+       gb_interface_disable(intf);
+       gb_interface_deactivate(intf);
+       mutex_unlock(&intf->mutex);
+}
+
+static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
+{
+       struct gb_svc_intf_mailbox_event_request *request;
+       struct gb_connection *connection = operation->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       struct gb_interface *intf;
+       u8 intf_id;
+       u16 result_code;
+       u32 mailbox;
+
+       /* The request message size has already been verified. */
+       request = operation->request->payload;
+       intf_id = request->intf_id;
+       result_code = le16_to_cpu(request->result_code);
+       mailbox = le32_to_cpu(request->mailbox);
+
+       dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
+                       __func__, intf_id, result_code, mailbox);
+
+       intf = gb_svc_interface_lookup(svc, intf_id);
+       if (!intf) {
+               dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
+               return;
+       }
+
+       gb_interface_mailbox_event(intf, result_code, mailbox);
+}
+
+static void gb_svc_process_deferred_request(struct work_struct *work)
+{
+       struct gb_svc_deferred_request *dr;
+       struct gb_operation *operation;
+       struct gb_svc *svc;
+       u8 type;
+
+       dr = container_of(work, struct gb_svc_deferred_request, work);
+       operation = dr->operation;
+       svc = gb_connection_get_data(operation->connection);
+       type = operation->request->header->type;
+
+       switch (type) {
+       case GB_SVC_TYPE_SVC_HELLO:
+               gb_svc_process_hello_deferred(operation);
+               break;
+       case GB_SVC_TYPE_MODULE_INSERTED:
+               gb_svc_process_module_inserted(operation);
+               break;
+       case GB_SVC_TYPE_MODULE_REMOVED:
+               gb_svc_process_module_removed(operation);
+               break;
+       case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
+               gb_svc_process_intf_mailbox_event(operation);
+               break;
+       case GB_SVC_TYPE_INTF_OOPS:
+               gb_svc_process_intf_oops(operation);
+               break;
+       default:
+               dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
+       }
+
+       gb_operation_put(operation);
+       kfree(dr);
+}
+
+static int gb_svc_queue_deferred_request(struct gb_operation *operation)
+{
+       struct gb_svc *svc = gb_connection_get_data(operation->connection);
+       struct gb_svc_deferred_request *dr;
+
+       dr = kmalloc(sizeof(*dr), GFP_KERNEL);
+       if (!dr)
+               return -ENOMEM;
+
+       gb_operation_get(operation);
+
+       dr->operation = operation;
+       INIT_WORK(&dr->work, gb_svc_process_deferred_request);
+
+       queue_work(svc->wq, &dr->work);
+
+       return 0;
+}
+
+static int gb_svc_intf_reset_recv(struct gb_operation *op)
+{
+       struct gb_svc *svc = gb_connection_get_data(op->connection);
+       struct gb_message *request = op->request;
+       struct gb_svc_intf_reset_request *reset;
+       u8 intf_id;
+
+       if (request->payload_size < sizeof(*reset)) {
+               dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
+                               request->payload_size, sizeof(*reset));
+               return -EINVAL;
+       }
+       reset = request->payload;
+
+       intf_id = reset->intf_id;
+
+       /* FIXME Reset the interface here */
+
+       return 0;
+}
+
+static int gb_svc_module_inserted_recv(struct gb_operation *op)
+{
+       struct gb_svc *svc = gb_connection_get_data(op->connection);
+       struct gb_svc_module_inserted_request *request;
+
+       if (op->request->payload_size < sizeof(*request)) {
+               dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
+                               op->request->payload_size, sizeof(*request));
+               return -EINVAL;
+       }
+
+       request = op->request->payload;
+
+       dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
+                       request->primary_intf_id);
+
+       return gb_svc_queue_deferred_request(op);
+}
+
+static int gb_svc_module_removed_recv(struct gb_operation *op)
+{
+       struct gb_svc *svc = gb_connection_get_data(op->connection);
+       struct gb_svc_module_removed_request *request;
+
+       if (op->request->payload_size < sizeof(*request)) {
+               dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
+                               op->request->payload_size, sizeof(*request));
+               return -EINVAL;
+       }
+
+       request = op->request->payload;
+
+       dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
+                       request->primary_intf_id);
+
+       return gb_svc_queue_deferred_request(op);
+}
+
+static int gb_svc_intf_oops_recv(struct gb_operation *op)
+{
+       struct gb_svc *svc = gb_connection_get_data(op->connection);
+       struct gb_svc_intf_oops_request *request;
+
+       if (op->request->payload_size < sizeof(*request)) {
+               dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
+                        op->request->payload_size, sizeof(*request));
+               return -EINVAL;
+       }
+
+       return gb_svc_queue_deferred_request(op);
+}
+
+static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
+{
+       struct gb_svc *svc = gb_connection_get_data(op->connection);
+       struct gb_svc_intf_mailbox_event_request *request;
+
+       if (op->request->payload_size < sizeof(*request)) {
+               dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
+                               op->request->payload_size, sizeof(*request));
+               return -EINVAL;
+       }
+
+       request = op->request->payload;
+
+       dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
+
+       return gb_svc_queue_deferred_request(op);
+}
+
+static int gb_svc_request_handler(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       u8 type = op->type;
+       int ret = 0;
+
+       /*
+        * SVC requests need to follow a specific order (at least initially) and
+        * below code takes care of enforcing that. The expected order is:
+        * - PROTOCOL_VERSION
+        * - SVC_HELLO
+        * - Any other request, but the earlier two.
+        *
+        * Incoming requests are guaranteed to be serialized and so we don't
+        * need to protect 'state' for any races.
+        */
+       switch (type) {
+       case GB_SVC_TYPE_PROTOCOL_VERSION:
+               if (svc->state != GB_SVC_STATE_RESET)
+                       ret = -EINVAL;
+               break;
+       case GB_SVC_TYPE_SVC_HELLO:
+               if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
+                       ret = -EINVAL;
+               break;
+       default:
+               if (svc->state != GB_SVC_STATE_SVC_HELLO)
+                       ret = -EINVAL;
+               break;
+       }
+
+       if (ret) {
+               dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
+                               type, svc->state);
+               return ret;
+       }
+
+       switch (type) {
+       case GB_SVC_TYPE_PROTOCOL_VERSION:
+               ret = gb_svc_version_request(op);
+               if (!ret)
+                       svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
+               return ret;
+       case GB_SVC_TYPE_SVC_HELLO:
+               ret = gb_svc_hello(op);
+               if (!ret)
+                       svc->state = GB_SVC_STATE_SVC_HELLO;
+               return ret;
+       case GB_SVC_TYPE_INTF_RESET:
+               return gb_svc_intf_reset_recv(op);
+       case GB_SVC_TYPE_MODULE_INSERTED:
+               return gb_svc_module_inserted_recv(op);
+       case GB_SVC_TYPE_MODULE_REMOVED:
+               return gb_svc_module_removed_recv(op);
+       case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
+               return gb_svc_intf_mailbox_event_recv(op);
+       case GB_SVC_TYPE_INTF_OOPS:
+               return gb_svc_intf_oops_recv(op);
+       default:
+               dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
+               return -EINVAL;
+       }
+}
+
+static void gb_svc_release(struct device *dev)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+
+       if (svc->connection)
+               gb_connection_destroy(svc->connection);
+       ida_destroy(&svc->device_id_map);
+       destroy_workqueue(svc->wq);
+       kfree(svc);
+}
+
+struct device_type greybus_svc_type = {
+       .name           = "greybus_svc",
+       .release        = gb_svc_release,
+};
+
+struct gb_svc *gb_svc_create(struct gb_host_device *hd)
+{
+       struct gb_svc *svc;
+
+       svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+       if (!svc)
+               return NULL;
+
+       svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
+       if (!svc->wq) {
+               kfree(svc);
+               return NULL;
+       }
+
+       svc->dev.parent = &hd->dev;
+       svc->dev.bus = &greybus_bus_type;
+       svc->dev.type = &greybus_svc_type;
+       svc->dev.groups = svc_groups;
+       svc->dev.dma_mask = svc->dev.parent->dma_mask;
+       device_initialize(&svc->dev);
+
+       dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
+
+       ida_init(&svc->device_id_map);
+       svc->state = GB_SVC_STATE_RESET;
+       svc->hd = hd;
+
+       svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
+                                               gb_svc_request_handler);
+       if (IS_ERR(svc->connection)) {
+               dev_err(&svc->dev, "failed to create connection: %ld\n",
+                               PTR_ERR(svc->connection));
+               goto err_put_device;
+       }
+
+       gb_connection_set_data(svc->connection, svc);
+
+       return svc;
+
+err_put_device:
+       put_device(&svc->dev);
+       return NULL;
+}
+
+int gb_svc_add(struct gb_svc *svc)
+{
+       int ret;
+
+       /*
+        * The SVC protocol is currently driven by the SVC, so the SVC device
+        * is added from the connection request handler when enough
+        * information has been received.
+        */
+       ret = gb_connection_enable(svc->connection);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void gb_svc_remove_modules(struct gb_svc *svc)
+{
+       struct gb_host_device *hd = svc->hd;
+       struct gb_module *module, *tmp;
+
+       list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
+               gb_module_del(module);
+               list_del(&module->hd_node);
+               gb_module_put(module);
+       }
+}
+
+void gb_svc_del(struct gb_svc *svc)
+{
+       gb_connection_disable_rx(svc->connection);
+
+       /*
+        * The SVC device may have been registered from the request handler.
+        */
+       if (device_is_registered(&svc->dev)) {
+               gb_timesync_svc_remove(svc);
+               gb_svc_debugfs_exit(svc);
+               gb_svc_watchdog_destroy(svc);
+               device_del(&svc->dev);
+       }
+
+       flush_workqueue(svc->wq);
+
+       gb_svc_remove_modules(svc);
+
+       gb_connection_disable(svc->connection);
+}
+
+void gb_svc_put(struct gb_svc *svc)
+{
+       put_device(&svc->dev);
+}
diff --git a/drivers/staging/greybus/svc.h b/drivers/staging/greybus/svc.h
new file mode 100644 (file)
index 0000000..d1d7ef9
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Greybus SVC code
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __SVC_H
+#define __SVC_H
+
+#define GB_SVC_CPORT_FLAG_E2EFC                BIT(0)
+#define GB_SVC_CPORT_FLAG_CSD_N                BIT(1)
+#define GB_SVC_CPORT_FLAG_CSV_N                BIT(2)
+
+enum gb_svc_state {
+       GB_SVC_STATE_RESET,
+       GB_SVC_STATE_PROTOCOL_VERSION,
+       GB_SVC_STATE_SVC_HELLO,
+};
+
+enum gb_svc_watchdog_bite {
+       GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0,
+       GB_SVC_WATCHDOG_BITE_PANIC_KERNEL,
+};
+
+struct gb_svc_watchdog;
+
+struct svc_debugfs_pwrmon_rail {
+       u8 id;
+       struct gb_svc *svc;
+};
+
+struct gb_svc {
+       struct device           dev;
+
+       struct gb_host_device   *hd;
+       struct gb_connection    *connection;
+       enum gb_svc_state       state;
+       struct ida              device_id_map;
+       struct workqueue_struct *wq;
+
+       u16 endo_id;
+       u8 ap_intf_id;
+
+       u8 protocol_major;
+       u8 protocol_minor;
+
+       struct gb_svc_watchdog  *watchdog;
+       enum gb_svc_watchdog_bite action;
+
+       struct dentry *debugfs_dentry;
+       struct svc_debugfs_pwrmon_rail *pwrmon_rails;
+};
+#define to_gb_svc(d) container_of(d, struct gb_svc, dev)
+
+struct gb_svc *gb_svc_create(struct gb_host_device *hd);
+int gb_svc_add(struct gb_svc *svc);
+void gb_svc_del(struct gb_svc *svc);
+void gb_svc_put(struct gb_svc *svc);
+
+int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
+                                 u8 measurement_type, u32 *value);
+int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id);
+int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
+                              u8 intf2_id, u8 dev2_id);
+void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id);
+int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
+                            u8 intf2_id, u16 cport2_id, u8 cport_flags);
+void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
+                              u8 intf2_id, u16 cport2_id);
+int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id);
+int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable);
+int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable);
+int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable);
+int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type);
+int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id);
+
+int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
+                       u32 *value);
+int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
+                       u32 value);
+int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
+                              u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
+                              u8 tx_amplitude, u8 tx_hs_equalizer,
+                              u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
+                              u8 flags, u32 quirks,
+                              struct gb_svc_l2_timer_cfg *local,
+                              struct gb_svc_l2_timer_cfg *remote);
+int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id);
+int gb_svc_ping(struct gb_svc *svc);
+int gb_svc_watchdog_create(struct gb_svc *svc);
+void gb_svc_watchdog_destroy(struct gb_svc *svc);
+bool gb_svc_watchdog_enabled(struct gb_svc *svc);
+int gb_svc_watchdog_enable(struct gb_svc *svc);
+int gb_svc_watchdog_disable(struct gb_svc *svc);
+int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
+                          u32 strobe_delay, u32 refclk);
+int gb_svc_timesync_disable(struct gb_svc *svc);
+int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time);
+int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time);
+int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask);
+int gb_svc_timesync_wake_pins_release(struct gb_svc *svc);
+
+int gb_svc_protocol_init(void);
+void gb_svc_protocol_exit(void);
+
+#endif /* __SVC_H */
diff --git a/drivers/staging/greybus/svc_watchdog.c b/drivers/staging/greybus/svc_watchdog.c
new file mode 100644 (file)
index 0000000..3729460
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * SVC Greybus "watchdog" driver.
+ *
+ * Copyright 2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/delay.h>
+#include <linux/suspend.h>
+#include <linux/workqueue.h>
+#include "greybus.h"
+
+#define SVC_WATCHDOG_PERIOD    (2*HZ)
+
+struct gb_svc_watchdog {
+       struct delayed_work     work;
+       struct gb_svc           *svc;
+       bool                    enabled;
+       struct notifier_block pm_notifier;
+};
+
+static struct delayed_work reset_work;
+
+static int svc_watchdog_pm_notifier(struct notifier_block *notifier,
+                                   unsigned long pm_event, void *unused)
+{
+       struct gb_svc_watchdog *watchdog =
+               container_of(notifier, struct gb_svc_watchdog, pm_notifier);
+
+       switch (pm_event) {
+       case PM_SUSPEND_PREPARE:
+               gb_svc_watchdog_disable(watchdog->svc);
+               break;
+       case PM_POST_SUSPEND:
+               gb_svc_watchdog_enable(watchdog->svc);
+               break;
+       default:
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static void greybus_reset(struct work_struct *work)
+{
+       static char start_path[256] = "/system/bin/start";
+       static char *envp[] = {
+               "HOME=/",
+               "PATH=/sbin:/vendor/bin:/system/sbin:/system/bin:/system/xbin",
+               NULL,
+       };
+       static char *argv[] = {
+               start_path,
+               "unipro_reset",
+               NULL,
+       };
+
+       printk(KERN_ERR "svc_watchdog: calling \"%s %s\" to reset greybus network!\n",
+              argv[0], argv[1]);
+       call_usermodehelper(start_path, argv, envp, UMH_WAIT_EXEC);
+}
+
+static void do_work(struct work_struct *work)
+{
+       struct gb_svc_watchdog *watchdog;
+       struct gb_svc *svc;
+       int retval;
+
+       watchdog = container_of(work, struct gb_svc_watchdog, work.work);
+       svc = watchdog->svc;
+
+       dev_dbg(&svc->dev, "%s: ping.\n", __func__);
+       retval = gb_svc_ping(svc);
+       if (retval) {
+               /*
+                * Something went really wrong, let's warn userspace and then
+                * pull the plug and reset the whole greybus network.
+                * We need to do this outside of this workqueue as we will be
+                * tearing down the svc device itself.  So queue up
+                * yet-another-callback to do that.
+                */
+               dev_err(&svc->dev,
+                       "SVC ping has returned %d, something is wrong!!!\n",
+                       retval);
+
+               if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) {
+                       panic("SVC is not responding\n");
+               } else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) {
+                       dev_err(&svc->dev, "Resetting the greybus network, watch out!!!\n");
+
+                       INIT_DELAYED_WORK(&reset_work, greybus_reset);
+                       schedule_delayed_work(&reset_work, HZ / 2);
+
+                       /*
+                        * Disable ourselves, we don't want to trip again unless
+                        * userspace wants us to.
+                        */
+                       watchdog->enabled = false;
+               }
+       }
+
+       /* resubmit our work to happen again, if we are still "alive" */
+       if (watchdog->enabled)
+               schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
+}
+
+int gb_svc_watchdog_create(struct gb_svc *svc)
+{
+       struct gb_svc_watchdog *watchdog;
+       int retval;
+
+       if (svc->watchdog)
+               return 0;
+
+       watchdog = kmalloc(sizeof(*watchdog), GFP_KERNEL);
+       if (!watchdog)
+               return -ENOMEM;
+
+       watchdog->enabled = false;
+       watchdog->svc = svc;
+       INIT_DELAYED_WORK(&watchdog->work, do_work);
+       svc->watchdog = watchdog;
+
+       watchdog->pm_notifier.notifier_call = svc_watchdog_pm_notifier;
+       retval = register_pm_notifier(&watchdog->pm_notifier);
+       if (retval) {
+               dev_err(&svc->dev, "error registering pm notifier(%d)\n",
+                       retval);
+               goto svc_watchdog_create_err;
+       }
+
+       retval = gb_svc_watchdog_enable(svc);
+       if (retval) {
+               dev_err(&svc->dev, "error enabling watchdog (%d)\n", retval);
+               unregister_pm_notifier(&watchdog->pm_notifier);
+               goto svc_watchdog_create_err;
+       }
+       return retval;
+
+svc_watchdog_create_err:
+       svc->watchdog = NULL;
+       kfree(watchdog);
+
+       return retval;
+}
+
+void gb_svc_watchdog_destroy(struct gb_svc *svc)
+{
+       struct gb_svc_watchdog *watchdog = svc->watchdog;
+
+       if (!watchdog)
+               return;
+
+       unregister_pm_notifier(&watchdog->pm_notifier);
+       gb_svc_watchdog_disable(svc);
+       svc->watchdog = NULL;
+       kfree(watchdog);
+}
+
+bool gb_svc_watchdog_enabled(struct gb_svc *svc)
+{
+       if (!svc || !svc->watchdog)
+               return false;
+       return svc->watchdog->enabled;
+}
+
+int gb_svc_watchdog_enable(struct gb_svc *svc)
+{
+       struct gb_svc_watchdog *watchdog;
+
+       if (!svc->watchdog)
+               return -ENODEV;
+
+       watchdog = svc->watchdog;
+       if (watchdog->enabled)
+               return 0;
+
+       watchdog->enabled = true;
+       schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
+       return 0;
+}
+
+int gb_svc_watchdog_disable(struct gb_svc *svc)
+{
+       struct gb_svc_watchdog *watchdog;
+
+       if (!svc->watchdog)
+               return -ENODEV;
+
+       watchdog = svc->watchdog;
+       if (!watchdog->enabled)
+               return 0;
+
+       watchdog->enabled = false;
+       cancel_delayed_work_sync(&watchdog->work);
+       return 0;
+}
diff --git a/drivers/staging/greybus/timesync.c b/drivers/staging/greybus/timesync.c
new file mode 100644 (file)
index 0000000..2e68af7
--- /dev/null
@@ -0,0 +1,1357 @@
+/*
+ * TimeSync API driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+#include <linux/debugfs.h>
+#include <linux/hrtimer.h>
+#include "greybus.h"
+#include "timesync.h"
+#include "greybus_trace.h"
+
+/*
+ * Minimum inter-strobe value of one millisecond is chosen because it
+ * just-about fits the common definition of a jiffy.
+ *
+ * Maximum value OTOH is constrained by the number of bits the SVC can fit
+ * into a 16 bit up-counter. The SVC configures the timer in microseconds
+ * so the maximum allowable value is 65535 microseconds. We clip that value
+ * to 10000 microseconds for the sake of using nice round base 10 numbers
+ * and since right-now there's no imaginable use-case requiring anything
+ * other than a one millisecond inter-strobe time, let alone something
+ * higher than ten milliseconds.
+ */
+#define GB_TIMESYNC_STROBE_DELAY_US            1000
+#define GB_TIMESYNC_DEFAULT_OFFSET_US          1000
+
+/* Work queue timers long, short and SVC strobe timeout */
+#define GB_TIMESYNC_DELAYED_WORK_LONG          msecs_to_jiffies(10)
+#define GB_TIMESYNC_DELAYED_WORK_SHORT         msecs_to_jiffies(1)
+#define GB_TIMESYNC_MAX_WAIT_SVC               msecs_to_jiffies(5000)
+#define GB_TIMESYNC_KTIME_UPDATE               msecs_to_jiffies(1000)
+#define GB_TIMESYNC_MAX_KTIME_CONVERSION       15
+
+/* Maximum number of times we'll retry a failed synchronous sync */
+#define GB_TIMESYNC_MAX_RETRIES                        5
+
+/* Reported nanoseconds/femtoseconds per clock */
+static u64 gb_timesync_ns_per_clock;
+static u64 gb_timesync_fs_per_clock;
+
+/* Maximum difference we will accept converting FrameTime to ktime */
+static u32 gb_timesync_max_ktime_diff;
+
+/* Reported clock rate */
+static unsigned long gb_timesync_clock_rate;
+
+/* Workqueue */
+static void gb_timesync_worker(struct work_struct *work);
+
+/* List of SVCs with one FrameTime per SVC */
+static LIST_HEAD(gb_timesync_svc_list);
+
+/* Synchronize parallel contexts accessing a valid timesync_svc pointer */
+static DEFINE_MUTEX(gb_timesync_svc_list_mutex);
+
+/* Structure to convert from FrameTime to timespec/ktime */
+struct gb_timesync_frame_time_data {
+       u64 frame_time;
+       struct timespec ts;
+};
+
+struct gb_timesync_svc {
+       struct list_head list;
+       struct list_head interface_list;
+       struct gb_svc *svc;
+       struct gb_timesync_host_device *timesync_hd;
+
+       spinlock_t spinlock;    /* Per SVC spinlock to sync with ISR */
+       struct mutex mutex;     /* Per SVC mutex for regular synchronization */
+
+       struct dentry *frame_time_dentry;
+       struct dentry *frame_ktime_dentry;
+       struct workqueue_struct *work_queue;
+       wait_queue_head_t wait_queue;
+       struct delayed_work delayed_work;
+       struct timer_list ktime_timer;
+
+       /* The current local FrameTime */
+       u64 frame_time_offset;
+       struct gb_timesync_frame_time_data strobe_data[GB_TIMESYNC_MAX_STROBES];
+       struct gb_timesync_frame_time_data ktime_data;
+
+       /* The SVC FrameTime and relative AP FrameTime @ last TIMESYNC_PING */
+       u64 svc_ping_frame_time;
+       u64 ap_ping_frame_time;
+
+       /* Transitory settings */
+       u32 strobe_mask;
+       bool offset_down;
+       bool print_ping;
+       bool capture_ping;
+       int strobe;
+
+       /* Current state */
+       int state;
+};
+
+struct gb_timesync_host_device {
+       struct list_head list;
+       struct gb_host_device *hd;
+       u64 ping_frame_time;
+};
+
+struct gb_timesync_interface {
+       struct list_head list;
+       struct gb_interface *interface;
+       u64 ping_frame_time;
+};
+
+enum gb_timesync_state {
+       GB_TIMESYNC_STATE_INVALID               = 0,
+       GB_TIMESYNC_STATE_INACTIVE              = 1,
+       GB_TIMESYNC_STATE_INIT                  = 2,
+       GB_TIMESYNC_STATE_WAIT_SVC              = 3,
+       GB_TIMESYNC_STATE_AUTHORITATIVE         = 4,
+       GB_TIMESYNC_STATE_PING                  = 5,
+       GB_TIMESYNC_STATE_ACTIVE                = 6,
+};
+
+static void gb_timesync_ktime_timer_fn(unsigned long data);
+
+static u64 gb_timesync_adjust_count(struct gb_timesync_svc *timesync_svc,
+                                   u64 counts)
+{
+       if (timesync_svc->offset_down)
+               return counts - timesync_svc->frame_time_offset;
+       else
+               return counts + timesync_svc->frame_time_offset;
+}
+
+/*
+ * This function provides the authoritative FrameTime to a calling function. It
+ * is designed to be lockless and should remain that way the caller is assumed
+ * to be state-aware.
+ */
+static u64 __gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
+{
+       u64 clocks = gb_timesync_platform_get_counter();
+
+       return gb_timesync_adjust_count(timesync_svc, clocks);
+}
+
+static void gb_timesync_schedule_svc_timeout(struct gb_timesync_svc
+                                            *timesync_svc)
+{
+       queue_delayed_work(timesync_svc->work_queue,
+                          &timesync_svc->delayed_work,
+                          GB_TIMESYNC_MAX_WAIT_SVC);
+}
+
+static void gb_timesync_set_state(struct gb_timesync_svc *timesync_svc,
+                                 int state)
+{
+       switch (state) {
+       case GB_TIMESYNC_STATE_INVALID:
+               timesync_svc->state = state;
+               wake_up(&timesync_svc->wait_queue);
+               break;
+       case GB_TIMESYNC_STATE_INACTIVE:
+               timesync_svc->state = state;
+               wake_up(&timesync_svc->wait_queue);
+               break;
+       case GB_TIMESYNC_STATE_INIT:
+               if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) {
+                       timesync_svc->strobe = 0;
+                       timesync_svc->frame_time_offset = 0;
+                       timesync_svc->state = state;
+                       cancel_delayed_work(&timesync_svc->delayed_work);
+                       queue_delayed_work(timesync_svc->work_queue,
+                                          &timesync_svc->delayed_work,
+                                          GB_TIMESYNC_DELAYED_WORK_LONG);
+               }
+               break;
+       case GB_TIMESYNC_STATE_WAIT_SVC:
+               if (timesync_svc->state == GB_TIMESYNC_STATE_INIT)
+                       timesync_svc->state = state;
+               break;
+       case GB_TIMESYNC_STATE_AUTHORITATIVE:
+               if (timesync_svc->state == GB_TIMESYNC_STATE_WAIT_SVC) {
+                       timesync_svc->state = state;
+                       cancel_delayed_work(&timesync_svc->delayed_work);
+                       queue_delayed_work(timesync_svc->work_queue,
+                                          &timesync_svc->delayed_work, 0);
+               }
+               break;
+       case GB_TIMESYNC_STATE_PING:
+               if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) {
+                       timesync_svc->state = state;
+                       queue_delayed_work(timesync_svc->work_queue,
+                                          &timesync_svc->delayed_work,
+                                          GB_TIMESYNC_DELAYED_WORK_SHORT);
+               }
+               break;
+       case GB_TIMESYNC_STATE_ACTIVE:
+               if (timesync_svc->state == GB_TIMESYNC_STATE_AUTHORITATIVE ||
+                   timesync_svc->state == GB_TIMESYNC_STATE_PING) {
+                       timesync_svc->state = state;
+                       wake_up(&timesync_svc->wait_queue);
+               }
+               break;
+       }
+
+       if (WARN_ON(timesync_svc->state != state)) {
+               pr_err("Invalid state transition %d=>%d\n",
+                      timesync_svc->state, state);
+       }
+}
+
+static void gb_timesync_set_state_atomic(struct gb_timesync_svc *timesync_svc,
+                                        int state)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&timesync_svc->spinlock, flags);
+       gb_timesync_set_state(timesync_svc, state);
+       spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
+}
+
+static u64 gb_timesync_diff(u64 x, u64 y)
+{
+       if (x > y)
+               return x - y;
+       else
+               return y - x;
+}
+
+static void gb_timesync_adjust_to_svc(struct gb_timesync_svc *svc,
+                                     u64 svc_frame_time, u64 ap_frame_time)
+{
+       if (svc_frame_time > ap_frame_time) {
+               svc->frame_time_offset = svc_frame_time - ap_frame_time;
+               svc->offset_down = false;
+       } else {
+               svc->frame_time_offset = ap_frame_time - svc_frame_time;
+               svc->offset_down = true;
+       }
+}
+
+/*
+ * Associate a FrameTime with a ktime timestamp represented as struct timespec
+ * Requires the calling context to hold timesync_svc->mutex
+ */
+static void gb_timesync_store_ktime(struct gb_timesync_svc *timesync_svc,
+                                   struct timespec ts, u64 frame_time)
+{
+       timesync_svc->ktime_data.ts = ts;
+       timesync_svc->ktime_data.frame_time = frame_time;
+}
+
+/*
+ * Find the two pulses that best-match our expected inter-strobe gap and
+ * then calculate the difference between the SVC time at the second pulse
+ * to the local time at the second pulse.
+ */
+static void gb_timesync_collate_frame_time(struct gb_timesync_svc *timesync_svc,
+                                          u64 *frame_time)
+{
+       int i = 0;
+       u64 delta, ap_frame_time;
+       u64 strobe_delay_ns = GB_TIMESYNC_STROBE_DELAY_US * NSEC_PER_USEC;
+       u64 least = 0;
+
+       for (i = 1; i < GB_TIMESYNC_MAX_STROBES; i++) {
+               delta = timesync_svc->strobe_data[i].frame_time -
+                       timesync_svc->strobe_data[i - 1].frame_time;
+               delta *= gb_timesync_ns_per_clock;
+               delta = gb_timesync_diff(delta, strobe_delay_ns);
+
+               if (!least || delta < least) {
+                       least = delta;
+                       gb_timesync_adjust_to_svc(timesync_svc, frame_time[i],
+                                                 timesync_svc->strobe_data[i].frame_time);
+
+                       ap_frame_time = timesync_svc->strobe_data[i].frame_time;
+                       ap_frame_time = gb_timesync_adjust_count(timesync_svc,
+                                                                ap_frame_time);
+                       gb_timesync_store_ktime(timesync_svc,
+                                               timesync_svc->strobe_data[i].ts,
+                                               ap_frame_time);
+
+                       pr_debug("adjust %s local %llu svc %llu delta %llu\n",
+                                timesync_svc->offset_down ? "down" : "up",
+                                timesync_svc->strobe_data[i].frame_time,
+                                frame_time[i], delta);
+               }
+       }
+}
+
+static void gb_timesync_teardown(struct gb_timesync_svc *timesync_svc)
+{
+       struct gb_timesync_interface *timesync_interface;
+       struct gb_svc *svc = timesync_svc->svc;
+       struct gb_interface *interface;
+       struct gb_host_device *hd;
+       int ret;
+
+       list_for_each_entry(timesync_interface,
+                           &timesync_svc->interface_list, list) {
+               interface = timesync_interface->interface;
+               ret = gb_interface_timesync_disable(interface);
+               if (ret) {
+                       dev_err(&interface->dev,
+                               "interface timesync_disable %d\n", ret);
+               }
+       }
+
+       hd = timesync_svc->timesync_hd->hd;
+       ret = hd->driver->timesync_disable(hd);
+       if (ret < 0) {
+               dev_err(&hd->dev, "host timesync_disable %d\n",
+                       ret);
+       }
+
+       gb_svc_timesync_wake_pins_release(svc);
+       gb_svc_timesync_disable(svc);
+       gb_timesync_platform_unlock_bus();
+
+       gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
+}
+
+static void gb_timesync_platform_lock_bus_fail(struct gb_timesync_svc
+                                               *timesync_svc, int ret)
+{
+       if (ret == -EAGAIN) {
+               gb_timesync_set_state(timesync_svc, timesync_svc->state);
+       } else {
+               pr_err("Failed to lock timesync bus %d\n", ret);
+               gb_timesync_set_state(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
+       }
+}
+
+static void gb_timesync_enable(struct gb_timesync_svc *timesync_svc)
+{
+       struct gb_svc *svc = timesync_svc->svc;
+       struct gb_host_device *hd;
+       struct gb_timesync_interface *timesync_interface;
+       struct gb_interface *interface;
+       u64 init_frame_time;
+       unsigned long clock_rate = gb_timesync_clock_rate;
+       int ret;
+
+       /*
+        * Get access to the wake pins in the AP and SVC
+        * Release these pins either in gb_timesync_teardown() or in
+        * gb_timesync_authoritative()
+        */
+       ret = gb_timesync_platform_lock_bus(timesync_svc);
+       if (ret < 0) {
+               gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
+               return;
+       }
+       ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
+       if (ret) {
+               dev_err(&svc->dev,
+                       "gb_svc_timesync_wake_pins_acquire %d\n", ret);
+               gb_timesync_teardown(timesync_svc);
+               return;
+       }
+
+       /* Choose an initial time in the future */
+       init_frame_time = __gb_timesync_get_frame_time(timesync_svc) + 100000UL;
+
+       /* Send enable command to all relevant participants */
+       list_for_each_entry(timesync_interface, &timesync_svc->interface_list,
+                           list) {
+               interface = timesync_interface->interface;
+               ret = gb_interface_timesync_enable(interface,
+                                                  GB_TIMESYNC_MAX_STROBES,
+                                                  init_frame_time,
+                                                  GB_TIMESYNC_STROBE_DELAY_US,
+                                                  clock_rate);
+               if (ret) {
+                       dev_err(&interface->dev,
+                               "interface timesync_enable %d\n", ret);
+               }
+       }
+
+       hd = timesync_svc->timesync_hd->hd;
+       ret = hd->driver->timesync_enable(hd, GB_TIMESYNC_MAX_STROBES,
+                                         init_frame_time,
+                                         GB_TIMESYNC_STROBE_DELAY_US,
+                                         clock_rate);
+       if (ret < 0) {
+               dev_err(&hd->dev, "host timesync_enable %d\n",
+                       ret);
+       }
+
+       gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_WAIT_SVC);
+       ret = gb_svc_timesync_enable(svc, GB_TIMESYNC_MAX_STROBES,
+                                    init_frame_time,
+                                    GB_TIMESYNC_STROBE_DELAY_US,
+                                    clock_rate);
+       if (ret) {
+               dev_err(&svc->dev,
+                       "gb_svc_timesync_enable %d\n", ret);
+               gb_timesync_teardown(timesync_svc);
+               return;
+       }
+
+       /* Schedule a timeout waiting for SVC to complete strobing */
+       gb_timesync_schedule_svc_timeout(timesync_svc);
+}
+
+static void gb_timesync_authoritative(struct gb_timesync_svc *timesync_svc)
+{
+       struct gb_svc *svc = timesync_svc->svc;
+       struct gb_host_device *hd;
+       struct gb_timesync_interface *timesync_interface;
+       struct gb_interface *interface;
+       u64 svc_frame_time[GB_TIMESYNC_MAX_STROBES];
+       int ret;
+
+       /* Get authoritative time from SVC and adjust local clock */
+       ret = gb_svc_timesync_authoritative(svc, svc_frame_time);
+       if (ret) {
+               dev_err(&svc->dev,
+                       "gb_svc_timesync_authoritative %d\n", ret);
+               gb_timesync_teardown(timesync_svc);
+               return;
+       }
+       gb_timesync_collate_frame_time(timesync_svc, svc_frame_time);
+
+       /* Transmit authoritative time to downstream slaves */
+       hd = timesync_svc->timesync_hd->hd;
+       ret = hd->driver->timesync_authoritative(hd, svc_frame_time);
+       if (ret < 0)
+               dev_err(&hd->dev, "host timesync_authoritative %d\n", ret);
+
+       list_for_each_entry(timesync_interface,
+                           &timesync_svc->interface_list, list) {
+               interface = timesync_interface->interface;
+               ret = gb_interface_timesync_authoritative(
+                                               interface,
+                                               svc_frame_time);
+               if (ret) {
+                       dev_err(&interface->dev,
+                               "interface timesync_authoritative %d\n", ret);
+               }
+       }
+
+       /* Release wake pins */
+       gb_svc_timesync_wake_pins_release(svc);
+       gb_timesync_platform_unlock_bus();
+
+       /* Transition to state ACTIVE */
+       gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
+
+       /* Schedule a ping to verify the synchronized system time */
+       timesync_svc->print_ping = true;
+       gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_PING);
+}
+
+static int __gb_timesync_get_status(struct gb_timesync_svc *timesync_svc)
+{
+       int ret = -EINVAL;
+
+       switch (timesync_svc->state) {
+       case GB_TIMESYNC_STATE_INVALID:
+       case GB_TIMESYNC_STATE_INACTIVE:
+               ret = -ENODEV;
+               break;
+       case GB_TIMESYNC_STATE_INIT:
+       case GB_TIMESYNC_STATE_WAIT_SVC:
+       case GB_TIMESYNC_STATE_AUTHORITATIVE:
+               ret = -EAGAIN;
+               break;
+       case GB_TIMESYNC_STATE_PING:
+       case GB_TIMESYNC_STATE_ACTIVE:
+               ret = 0;
+               break;
+       }
+       return ret;
+}
+
+/*
+ * This routine takes a FrameTime and derives the difference with-respect
+ * to a reference FrameTime/ktime pair. It then returns the calculated
+ * ktime based on the difference between the supplied FrameTime and
+ * the reference FrameTime.
+ *
+ * The time difference is calculated to six decimal places. Taking 19.2MHz
+ * as an example this means we have 52.083333~ nanoseconds per clock or
+ * 52083333~ femtoseconds per clock.
+ *
+ * Naively taking the count difference and converting to
+ * seconds/nanoseconds would quickly see the 0.0833 component produce
+ * noticeable errors. For example a time difference of one second would
+ * loose 19200000 * 0.08333x nanoseconds or 1.59 seconds.
+ *
+ * In contrast calculating in femtoseconds the same example of 19200000 *
+ * 0.000000083333x nanoseconds per count of error is just 1.59 nanoseconds!
+ *
+ * Continuing the example of 19.2 MHz we cap the maximum error difference
+ * at a worst-case 0.3 microseconds over a potential calculation window of
+ * abount 15 seconds, meaning you can convert a FrameTime that is <= 15
+ * seconds older/younger than the reference time with a maximum error of
+ * 0.2385 useconds. Note 19.2MHz is an example frequency not a requirement.
+ */
+static int gb_timesync_to_timespec(struct gb_timesync_svc *timesync_svc,
+                                  u64 frame_time, struct timespec *ts)
+{
+       unsigned long flags;
+       u64 delta_fs, counts, sec, nsec;
+       bool add;
+       int ret = 0;
+
+       memset(ts, 0x00, sizeof(*ts));
+       mutex_lock(&timesync_svc->mutex);
+       spin_lock_irqsave(&timesync_svc->spinlock, flags);
+
+       ret = __gb_timesync_get_status(timesync_svc);
+       if (ret)
+               goto done;
+
+       /* Support calculating ktime upwards or downwards from the reference */
+       if (frame_time < timesync_svc->ktime_data.frame_time) {
+               add = false;
+               counts = timesync_svc->ktime_data.frame_time - frame_time;
+       } else {
+               add = true;
+               counts = frame_time - timesync_svc->ktime_data.frame_time;
+       }
+
+       /* Enforce the .23 of a usecond boundary @ 19.2MHz */
+       if (counts > gb_timesync_max_ktime_diff) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       /* Determine the time difference in femtoseconds */
+       delta_fs = counts * gb_timesync_fs_per_clock;
+
+       /* Convert to seconds */
+       sec = delta_fs;
+       do_div(sec, NSEC_PER_SEC);
+       do_div(sec, 1000000UL);
+
+       /* Get the nanosecond remainder */
+       nsec = do_div(delta_fs, sec);
+       do_div(nsec, 1000000UL);
+
+       if (add) {
+               /* Add the calculated offset - overflow nanoseconds upwards */
+               ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec + sec;
+               ts->tv_nsec = timesync_svc->ktime_data.ts.tv_nsec + nsec;
+               if (ts->tv_nsec >= NSEC_PER_SEC) {
+                       ts->tv_sec++;
+                       ts->tv_nsec -= NSEC_PER_SEC;
+               }
+       } else {
+               /* Subtract the difference over/underflow as necessary */
+               if (nsec > timesync_svc->ktime_data.ts.tv_nsec) {
+                       sec++;
+                       nsec = nsec + timesync_svc->ktime_data.ts.tv_nsec;
+                       nsec = do_div(nsec, NSEC_PER_SEC);
+               } else {
+                       nsec = timesync_svc->ktime_data.ts.tv_nsec - nsec;
+               }
+               /* Cannot return a negative second value */
+               if (sec > timesync_svc->ktime_data.ts.tv_sec) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+               ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec - sec;
+               ts->tv_nsec = nsec;
+       }
+done:
+       spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
+       mutex_unlock(&timesync_svc->mutex);
+       return ret;
+}
+
+static size_t gb_timesync_log_frame_time(struct gb_timesync_svc *timesync_svc,
+                                        char *buf, size_t buflen)
+{
+       struct gb_svc *svc = timesync_svc->svc;
+       struct gb_host_device *hd;
+       struct gb_timesync_interface *timesync_interface;
+       struct gb_interface *interface;
+       unsigned int len;
+       size_t off;
+
+       /* AP/SVC */
+       off = snprintf(buf, buflen, "%s frametime: ap=%llu %s=%llu ",
+                      greybus_bus_type.name,
+                      timesync_svc->ap_ping_frame_time, dev_name(&svc->dev),
+                      timesync_svc->svc_ping_frame_time);
+       len = buflen - off;
+
+       /* APB/GPB */
+       if (len < buflen) {
+               hd = timesync_svc->timesync_hd->hd;
+               off += snprintf(&buf[off], len, "%s=%llu ", dev_name(&hd->dev),
+                               timesync_svc->timesync_hd->ping_frame_time);
+               len = buflen - off;
+       }
+
+       list_for_each_entry(timesync_interface,
+                           &timesync_svc->interface_list, list) {
+               if (len < buflen) {
+                       interface = timesync_interface->interface;
+                       off += snprintf(&buf[off], len, "%s=%llu ",
+                                       dev_name(&interface->dev),
+                                       timesync_interface->ping_frame_time);
+                       len = buflen - off;
+               }
+       }
+       if (len < buflen)
+               off += snprintf(&buf[off], len, "\n");
+       return off;
+}
+
+static size_t gb_timesync_log_frame_ktime(struct gb_timesync_svc *timesync_svc,
+                                         char *buf, size_t buflen)
+{
+       struct gb_svc *svc = timesync_svc->svc;
+       struct gb_host_device *hd;
+       struct gb_timesync_interface *timesync_interface;
+       struct gb_interface *interface;
+       struct timespec ts;
+       unsigned int len;
+       size_t off;
+
+       /* AP */
+       gb_timesync_to_timespec(timesync_svc, timesync_svc->ap_ping_frame_time,
+                               &ts);
+       off = snprintf(buf, buflen, "%s frametime: ap=%lu.%lu ",
+                      greybus_bus_type.name, ts.tv_sec, ts.tv_nsec);
+       len = buflen - off;
+       if (len >= buflen)
+               goto done;
+
+       /* SVC */
+       gb_timesync_to_timespec(timesync_svc, timesync_svc->svc_ping_frame_time,
+                               &ts);
+       off += snprintf(&buf[off], len, "%s=%lu.%lu ", dev_name(&svc->dev),
+                       ts.tv_sec, ts.tv_nsec);
+       len = buflen - off;
+       if (len >= buflen)
+               goto done;
+
+       /* APB/GPB */
+       hd = timesync_svc->timesync_hd->hd;
+       gb_timesync_to_timespec(timesync_svc,
+                               timesync_svc->timesync_hd->ping_frame_time,
+                               &ts);
+       off += snprintf(&buf[off], len, "%s=%lu.%lu ",
+                       dev_name(&hd->dev),
+                       ts.tv_sec, ts.tv_nsec);
+       len = buflen - off;
+       if (len >= buflen)
+               goto done;
+
+       list_for_each_entry(timesync_interface,
+                           &timesync_svc->interface_list, list) {
+               interface = timesync_interface->interface;
+               gb_timesync_to_timespec(timesync_svc,
+                                       timesync_interface->ping_frame_time,
+                                       &ts);
+               off += snprintf(&buf[off], len, "%s=%lu.%lu ",
+                               dev_name(&interface->dev),
+                               ts.tv_sec, ts.tv_nsec);
+               len = buflen - off;
+               if (len >= buflen)
+                       goto done;
+       }
+       off += snprintf(&buf[off], len, "\n");
+done:
+       return off;
+}
+
+/*
+ * Send an SVC initiated wake 'ping' to each TimeSync participant.
+ * Get the FrameTime from each participant associated with the wake
+ * ping.
+ */
+static void gb_timesync_ping(struct gb_timesync_svc *timesync_svc)
+{
+       struct gb_svc *svc = timesync_svc->svc;
+       struct gb_host_device *hd;
+       struct gb_timesync_interface *timesync_interface;
+       struct gb_control *control;
+       u64 *ping_frame_time;
+       int ret;
+
+       /* Get access to the wake pins in the AP and SVC */
+       ret = gb_timesync_platform_lock_bus(timesync_svc);
+       if (ret < 0) {
+               gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
+               return;
+       }
+       ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
+       if (ret) {
+               dev_err(&svc->dev,
+                       "gb_svc_timesync_wake_pins_acquire %d\n", ret);
+               gb_timesync_teardown(timesync_svc);
+               return;
+       }
+
+       /* Have SVC generate a timesync ping */
+       timesync_svc->capture_ping = true;
+       timesync_svc->svc_ping_frame_time = 0;
+       ret = gb_svc_timesync_ping(svc, &timesync_svc->svc_ping_frame_time);
+       timesync_svc->capture_ping = false;
+       if (ret) {
+               dev_err(&svc->dev,
+                       "gb_svc_timesync_ping %d\n", ret);
+               gb_timesync_teardown(timesync_svc);
+               return;
+       }
+
+       /* Get the ping FrameTime from each APB/GPB */
+       hd = timesync_svc->timesync_hd->hd;
+       timesync_svc->timesync_hd->ping_frame_time = 0;
+       ret = hd->driver->timesync_get_last_event(hd,
+               &timesync_svc->timesync_hd->ping_frame_time);
+       if (ret)
+               dev_err(&hd->dev, "host timesync_get_last_event %d\n", ret);
+
+       list_for_each_entry(timesync_interface,
+                           &timesync_svc->interface_list, list) {
+               control = timesync_interface->interface->control;
+               timesync_interface->ping_frame_time = 0;
+               ping_frame_time = &timesync_interface->ping_frame_time;
+               ret = gb_control_timesync_get_last_event(control,
+                                                        ping_frame_time);
+               if (ret) {
+                       dev_err(&timesync_interface->interface->dev,
+                               "gb_control_timesync_get_last_event %d\n", ret);
+               }
+       }
+
+       /* Ping success - move to timesync active */
+       gb_svc_timesync_wake_pins_release(svc);
+       gb_timesync_platform_unlock_bus();
+       gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
+}
+
+static void gb_timesync_log_ping_time(struct gb_timesync_svc *timesync_svc)
+{
+       char *buf;
+
+       if (!timesync_svc->print_ping)
+               return;
+
+       buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+       if (buf) {
+               gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
+               dev_dbg(&timesync_svc->svc->dev, "%s", buf);
+               kfree(buf);
+       }
+}
+
+/*
+ * Perform the actual work of scheduled TimeSync logic.
+ */
+static void gb_timesync_worker(struct work_struct *work)
+{
+       struct delayed_work *delayed_work = to_delayed_work(work);
+       struct gb_timesync_svc *timesync_svc =
+               container_of(delayed_work, struct gb_timesync_svc, delayed_work);
+
+       mutex_lock(&timesync_svc->mutex);
+
+       switch (timesync_svc->state) {
+       case GB_TIMESYNC_STATE_INIT:
+               gb_timesync_enable(timesync_svc);
+               break;
+
+       case GB_TIMESYNC_STATE_WAIT_SVC:
+               dev_err(&timesync_svc->svc->dev,
+                       "timeout SVC strobe completion %d/%d\n",
+                       timesync_svc->strobe, GB_TIMESYNC_MAX_STROBES);
+               gb_timesync_teardown(timesync_svc);
+               break;
+
+       case GB_TIMESYNC_STATE_AUTHORITATIVE:
+               gb_timesync_authoritative(timesync_svc);
+               break;
+
+       case GB_TIMESYNC_STATE_PING:
+               gb_timesync_ping(timesync_svc);
+               gb_timesync_log_ping_time(timesync_svc);
+               break;
+
+       default:
+               pr_err("Invalid state %d for delayed work\n",
+                      timesync_svc->state);
+               break;
+       }
+
+       mutex_unlock(&timesync_svc->mutex);
+}
+
+/*
+ * Schedule a new TimeSync INIT or PING operation serialized w/r to
+ * gb_timesync_worker().
+ */
+static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state)
+{
+       int ret = 0;
+
+       if (state != GB_TIMESYNC_STATE_INIT && state != GB_TIMESYNC_STATE_PING)
+               return -EINVAL;
+
+       mutex_lock(&timesync_svc->mutex);
+       if (timesync_svc->state !=  GB_TIMESYNC_STATE_INVALID) {
+               gb_timesync_set_state_atomic(timesync_svc, state);
+       } else {
+               ret = -ENODEV;
+       }
+       mutex_unlock(&timesync_svc->mutex);
+       return ret;
+}
+
+static int __gb_timesync_schedule_synchronous(
+       struct gb_timesync_svc *timesync_svc, int state)
+{
+       unsigned long flags;
+       int ret;
+
+       ret = gb_timesync_schedule(timesync_svc, state);
+       if (ret)
+               return ret;
+
+       ret = wait_event_interruptible(timesync_svc->wait_queue,
+                       (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE ||
+                        timesync_svc->state == GB_TIMESYNC_STATE_INACTIVE ||
+                        timesync_svc->state == GB_TIMESYNC_STATE_INVALID));
+       if (ret)
+               return ret;
+
+       mutex_lock(&timesync_svc->mutex);
+       spin_lock_irqsave(&timesync_svc->spinlock, flags);
+
+       ret = __gb_timesync_get_status(timesync_svc);
+
+       spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
+       mutex_unlock(&timesync_svc->mutex);
+
+       return ret;
+}
+
+static struct gb_timesync_svc *gb_timesync_find_timesync_svc(
+       struct gb_host_device *hd)
+{
+       struct gb_timesync_svc *timesync_svc;
+
+       list_for_each_entry(timesync_svc, &gb_timesync_svc_list, list) {
+               if (timesync_svc->svc == hd->svc)
+                       return timesync_svc;
+       }
+       return NULL;
+}
+
+static struct gb_timesync_interface *gb_timesync_find_timesync_interface(
+       struct gb_timesync_svc *timesync_svc,
+       struct gb_interface *interface)
+{
+       struct gb_timesync_interface *timesync_interface;
+
+       list_for_each_entry(timesync_interface, &timesync_svc->interface_list, list) {
+               if (timesync_interface->interface == interface)
+                       return timesync_interface;
+       }
+       return NULL;
+}
+
+int gb_timesync_schedule_synchronous(struct gb_interface *interface)
+{
+       int ret;
+       struct gb_timesync_svc *timesync_svc;
+       int retries;
+
+       if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
+               return 0;
+
+       mutex_lock(&gb_timesync_svc_list_mutex);
+       for (retries = 0; retries < GB_TIMESYNC_MAX_RETRIES; retries++) {
+               timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
+               if (!timesync_svc) {
+                       ret = -ENODEV;
+                       goto done;
+               }
+
+               ret = __gb_timesync_schedule_synchronous(timesync_svc,
+                                                GB_TIMESYNC_STATE_INIT);
+               if (!ret)
+                       break;
+       }
+       if (ret && retries == GB_TIMESYNC_MAX_RETRIES)
+               ret = -ETIMEDOUT;
+done:
+       mutex_unlock(&gb_timesync_svc_list_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_schedule_synchronous);
+
+void gb_timesync_schedule_asynchronous(struct gb_interface *interface)
+{
+       struct gb_timesync_svc *timesync_svc;
+
+       if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
+               return;
+
+       mutex_lock(&gb_timesync_svc_list_mutex);
+       timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
+       if (!timesync_svc)
+               goto done;
+
+       gb_timesync_schedule(timesync_svc, GB_TIMESYNC_STATE_INIT);
+done:
+       mutex_unlock(&gb_timesync_svc_list_mutex);
+       return;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous);
+
+static ssize_t gb_timesync_ping_read(struct file *file, char __user *ubuf,
+                                    size_t len, loff_t *offset, bool ktime)
+{
+       struct gb_timesync_svc *timesync_svc = file->f_inode->i_private;
+       char *buf;
+       ssize_t ret = 0;
+
+       mutex_lock(&gb_timesync_svc_list_mutex);
+       mutex_lock(&timesync_svc->mutex);
+       if (list_empty(&timesync_svc->interface_list))
+               ret = -ENODEV;
+       timesync_svc->print_ping = false;
+       mutex_unlock(&timesync_svc->mutex);
+       if (ret)
+               goto done;
+
+       ret = __gb_timesync_schedule_synchronous(timesync_svc,
+                                                GB_TIMESYNC_STATE_PING);
+       if (ret)
+               goto done;
+
+       buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!buf) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       if (ktime)
+               ret = gb_timesync_log_frame_ktime(timesync_svc, buf, PAGE_SIZE);
+       else
+               ret = gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
+       if (ret > 0)
+               ret = simple_read_from_buffer(ubuf, len, offset, buf, ret);
+       kfree(buf);
+done:
+       mutex_unlock(&gb_timesync_svc_list_mutex);
+       return ret;
+}
+
+static ssize_t gb_timesync_ping_read_frame_time(struct file *file,
+                                               char __user *buf,
+                                               size_t len, loff_t *offset)
+{
+       return gb_timesync_ping_read(file, buf, len, offset, false);
+}
+
+static ssize_t gb_timesync_ping_read_frame_ktime(struct file *file,
+                                                char __user *buf,
+                                                size_t len, loff_t *offset)
+{
+       return gb_timesync_ping_read(file, buf, len, offset, true);
+}
+
+static const struct file_operations gb_timesync_debugfs_frame_time_ops = {
+       .read           = gb_timesync_ping_read_frame_time,
+};
+
+static const struct file_operations gb_timesync_debugfs_frame_ktime_ops = {
+       .read           = gb_timesync_ping_read_frame_ktime,
+};
+
+static int gb_timesync_hd_add(struct gb_timesync_svc *timesync_svc,
+                             struct gb_host_device *hd)
+{
+       struct gb_timesync_host_device *timesync_hd;
+
+       timesync_hd = kzalloc(sizeof(*timesync_hd), GFP_KERNEL);
+       if (!timesync_hd)
+               return -ENOMEM;
+
+       WARN_ON(timesync_svc->timesync_hd);
+       timesync_hd->hd = hd;
+       timesync_svc->timesync_hd = timesync_hd;
+
+       return 0;
+}
+
+static void gb_timesync_hd_remove(struct gb_timesync_svc *timesync_svc,
+                                 struct gb_host_device *hd)
+{
+       if (timesync_svc->timesync_hd->hd == hd) {
+               kfree(timesync_svc->timesync_hd);
+               timesync_svc->timesync_hd = NULL;
+               return;
+       }
+       WARN_ON(1);
+}
+
+int gb_timesync_svc_add(struct gb_svc *svc)
+{
+       struct gb_timesync_svc *timesync_svc;
+       int ret;
+
+       timesync_svc = kzalloc(sizeof(*timesync_svc), GFP_KERNEL);
+       if (!timesync_svc)
+               return -ENOMEM;
+
+       timesync_svc->work_queue =
+               create_singlethread_workqueue("gb-timesync-work_queue");
+
+       if (!timesync_svc->work_queue) {
+               kfree(timesync_svc);
+               return -ENOMEM;
+       }
+
+       mutex_lock(&gb_timesync_svc_list_mutex);
+       INIT_LIST_HEAD(&timesync_svc->interface_list);
+       INIT_DELAYED_WORK(&timesync_svc->delayed_work, gb_timesync_worker);
+       mutex_init(&timesync_svc->mutex);
+       spin_lock_init(&timesync_svc->spinlock);
+       init_waitqueue_head(&timesync_svc->wait_queue);
+
+       timesync_svc->svc = svc;
+       timesync_svc->frame_time_offset = 0;
+       timesync_svc->capture_ping = false;
+       gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
+
+       timesync_svc->frame_time_dentry =
+               debugfs_create_file("frame-time", S_IRUGO, svc->debugfs_dentry,
+                                   timesync_svc,
+                                   &gb_timesync_debugfs_frame_time_ops);
+       timesync_svc->frame_ktime_dentry =
+               debugfs_create_file("frame-ktime", S_IRUGO, svc->debugfs_dentry,
+                                   timesync_svc,
+                                   &gb_timesync_debugfs_frame_ktime_ops);
+
+       list_add(&timesync_svc->list, &gb_timesync_svc_list);
+       ret = gb_timesync_hd_add(timesync_svc, svc->hd);
+       if (ret) {
+               list_del(&timesync_svc->list);
+               debugfs_remove(timesync_svc->frame_ktime_dentry);
+               debugfs_remove(timesync_svc->frame_time_dentry);
+               destroy_workqueue(timesync_svc->work_queue);
+               kfree(timesync_svc);
+               goto done;
+       }
+
+       init_timer(&timesync_svc->ktime_timer);
+       timesync_svc->ktime_timer.function = gb_timesync_ktime_timer_fn;
+       timesync_svc->ktime_timer.expires = jiffies + GB_TIMESYNC_KTIME_UPDATE;
+       timesync_svc->ktime_timer.data = (unsigned long)timesync_svc;
+       add_timer(&timesync_svc->ktime_timer);
+done:
+       mutex_unlock(&gb_timesync_svc_list_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_svc_add);
+
+void gb_timesync_svc_remove(struct gb_svc *svc)
+{
+       struct gb_timesync_svc *timesync_svc;
+       struct gb_timesync_interface *timesync_interface;
+       struct gb_timesync_interface *next;
+
+       mutex_lock(&gb_timesync_svc_list_mutex);
+       timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
+       if (!timesync_svc)
+               goto done;
+
+       cancel_delayed_work_sync(&timesync_svc->delayed_work);
+
+       mutex_lock(&timesync_svc->mutex);
+
+       gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INVALID);
+       del_timer_sync(&timesync_svc->ktime_timer);
+       gb_timesync_teardown(timesync_svc);
+
+       gb_timesync_hd_remove(timesync_svc, svc->hd);
+       list_for_each_entry_safe(timesync_interface, next,
+                                &timesync_svc->interface_list, list) {
+               list_del(&timesync_interface->list);
+               kfree(timesync_interface);
+       }
+       debugfs_remove(timesync_svc->frame_ktime_dentry);
+       debugfs_remove(timesync_svc->frame_time_dentry);
+       destroy_workqueue(timesync_svc->work_queue);
+       list_del(&timesync_svc->list);
+
+       mutex_unlock(&timesync_svc->mutex);
+
+       kfree(timesync_svc);
+done:
+       mutex_unlock(&gb_timesync_svc_list_mutex);
+}
+EXPORT_SYMBOL_GPL(gb_timesync_svc_remove);
+
+/*
+ * Add a Greybus Interface to the set of TimeSync Interfaces.
+ */
+int gb_timesync_interface_add(struct gb_interface *interface)
+{
+       struct gb_timesync_svc *timesync_svc;
+       struct gb_timesync_interface *timesync_interface;
+       int ret = 0;
+
+       if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
+               return 0;
+
+       mutex_lock(&gb_timesync_svc_list_mutex);
+       timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
+       if (!timesync_svc) {
+               ret = -ENODEV;
+               goto done;
+       }
+
+       timesync_interface = kzalloc(sizeof(*timesync_interface), GFP_KERNEL);
+       if (!timesync_interface) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       mutex_lock(&timesync_svc->mutex);
+       timesync_interface->interface = interface;
+       list_add(&timesync_interface->list, &timesync_svc->interface_list);
+       timesync_svc->strobe_mask |= 1 << interface->interface_id;
+       mutex_unlock(&timesync_svc->mutex);
+
+done:
+       mutex_unlock(&gb_timesync_svc_list_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_interface_add);
+
+/*
+ * Remove a Greybus Interface from the set of TimeSync Interfaces.
+ */
+void gb_timesync_interface_remove(struct gb_interface *interface)
+{
+       struct gb_timesync_svc *timesync_svc;
+       struct gb_timesync_interface *timesync_interface;
+
+       if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
+               return;
+
+       mutex_lock(&gb_timesync_svc_list_mutex);
+       timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
+       if (!timesync_svc)
+               goto done;
+
+       timesync_interface = gb_timesync_find_timesync_interface(timesync_svc,
+                                                                interface);
+       if (!timesync_interface)
+               goto done;
+
+       mutex_lock(&timesync_svc->mutex);
+       timesync_svc->strobe_mask &= ~(1 << interface->interface_id);
+       list_del(&timesync_interface->list);
+       kfree(timesync_interface);
+       mutex_unlock(&timesync_svc->mutex);
+done:
+       mutex_unlock(&gb_timesync_svc_list_mutex);
+}
+EXPORT_SYMBOL_GPL(gb_timesync_interface_remove);
+
+/*
+ * Give the authoritative FrameTime to the calling function. Returns zero if we
+ * are not in GB_TIMESYNC_STATE_ACTIVE.
+ */
+static u64 gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
+{
+       unsigned long flags;
+       u64 ret;
+
+       spin_lock_irqsave(&timesync_svc->spinlock, flags);
+       if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE)
+               ret = __gb_timesync_get_frame_time(timesync_svc);
+       else
+               ret = 0;
+       spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
+       return ret;
+}
+
+u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface)
+{
+       struct gb_timesync_svc *timesync_svc;
+       u64 ret = 0;
+
+       mutex_lock(&gb_timesync_svc_list_mutex);
+       timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
+       if (!timesync_svc)
+               goto done;
+
+       ret = gb_timesync_get_frame_time(timesync_svc);
+done:
+       mutex_unlock(&gb_timesync_svc_list_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_interface);
+
+u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc)
+{
+       struct gb_timesync_svc *timesync_svc;
+       u64 ret = 0;
+
+       mutex_lock(&gb_timesync_svc_list_mutex);
+       timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
+       if (!timesync_svc)
+               goto done;
+
+       ret = gb_timesync_get_frame_time(timesync_svc);
+done:
+       mutex_unlock(&gb_timesync_svc_list_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_svc);
+
+/* Incrementally updates the conversion base from FrameTime to ktime */
+static void gb_timesync_ktime_timer_fn(unsigned long data)
+{
+       struct gb_timesync_svc *timesync_svc =
+               (struct gb_timesync_svc *)data;
+       unsigned long flags;
+       u64 frame_time;
+       struct timespec ts;
+
+       spin_lock_irqsave(&timesync_svc->spinlock, flags);
+
+       if (timesync_svc->state != GB_TIMESYNC_STATE_ACTIVE)
+               goto done;
+
+       ktime_get_ts(&ts);
+       frame_time = __gb_timesync_get_frame_time(timesync_svc);
+       gb_timesync_store_ktime(timesync_svc, ts, frame_time);
+
+done:
+       spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
+       mod_timer(&timesync_svc->ktime_timer,
+                 jiffies + GB_TIMESYNC_KTIME_UPDATE);
+}
+
+int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time,
+                                  struct timespec *ts)
+{
+       struct gb_timesync_svc *timesync_svc;
+       int ret = 0;
+
+       mutex_lock(&gb_timesync_svc_list_mutex);
+       timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
+       if (!timesync_svc) {
+               ret = -ENODEV;
+               goto done;
+       }
+       ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
+done:
+       mutex_unlock(&gb_timesync_svc_list_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_svc);
+
+int gb_timesync_to_timespec_by_interface(struct gb_interface *interface,
+                                        u64 frame_time, struct timespec *ts)
+{
+       struct gb_timesync_svc *timesync_svc;
+       int ret = 0;
+
+       mutex_lock(&gb_timesync_svc_list_mutex);
+       timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
+       if (!timesync_svc) {
+               ret = -ENODEV;
+               goto done;
+       }
+
+       ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
+done:
+       mutex_unlock(&gb_timesync_svc_list_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_interface);
+
+void gb_timesync_irq(struct gb_timesync_svc *timesync_svc)
+{
+       unsigned long flags;
+       u64 strobe_time;
+       bool strobe_is_ping = true;
+       struct timespec ts;
+
+       ktime_get_ts(&ts);
+       strobe_time = __gb_timesync_get_frame_time(timesync_svc);
+
+       spin_lock_irqsave(&timesync_svc->spinlock, flags);
+
+       if (timesync_svc->state == GB_TIMESYNC_STATE_PING) {
+               if (!timesync_svc->capture_ping)
+                       goto done_nolog;
+               timesync_svc->ap_ping_frame_time = strobe_time;
+               goto done_log;
+       } else if (timesync_svc->state != GB_TIMESYNC_STATE_WAIT_SVC) {
+               goto done_nolog;
+       }
+
+       timesync_svc->strobe_data[timesync_svc->strobe].frame_time = strobe_time;
+       timesync_svc->strobe_data[timesync_svc->strobe].ts = ts;
+
+       if (++timesync_svc->strobe == GB_TIMESYNC_MAX_STROBES) {
+               gb_timesync_set_state(timesync_svc,
+                                     GB_TIMESYNC_STATE_AUTHORITATIVE);
+       }
+       strobe_is_ping = false;
+done_log:
+       trace_gb_timesync_irq(strobe_is_ping, timesync_svc->strobe,
+                             GB_TIMESYNC_MAX_STROBES, strobe_time);
+done_nolog:
+       spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
+}
+EXPORT_SYMBOL(gb_timesync_irq);
+
+int __init gb_timesync_init(void)
+{
+       int ret = 0;
+
+       ret = gb_timesync_platform_init();
+       if (ret) {
+               pr_err("timesync platform init fail!\n");
+               return ret;
+       }
+
+       gb_timesync_clock_rate = gb_timesync_platform_get_clock_rate();
+
+       /* Calculate nanoseconds and femtoseconds per clock */
+       gb_timesync_fs_per_clock = FSEC_PER_SEC;
+       do_div(gb_timesync_fs_per_clock, gb_timesync_clock_rate);
+       gb_timesync_ns_per_clock = NSEC_PER_SEC;
+       do_div(gb_timesync_ns_per_clock, gb_timesync_clock_rate);
+
+       /* Calculate the maximum number of clocks we will convert to ktime */
+       gb_timesync_max_ktime_diff =
+               GB_TIMESYNC_MAX_KTIME_CONVERSION * gb_timesync_clock_rate;
+
+       pr_info("Time-Sync @ %lu Hz max ktime conversion +/- %d seconds\n",
+               gb_timesync_clock_rate, GB_TIMESYNC_MAX_KTIME_CONVERSION);
+       return 0;
+}
+
+void gb_timesync_exit(void)
+{
+       gb_timesync_platform_exit();
+}
diff --git a/drivers/staging/greybus/timesync.h b/drivers/staging/greybus/timesync.h
new file mode 100644 (file)
index 0000000..72fc9a3
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * TimeSync API driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __TIMESYNC_H
+#define __TIMESYNC_H
+
+struct gb_svc;
+struct gb_interface;
+struct gb_timesync_svc;
+
+/* Platform */
+u64 gb_timesync_platform_get_counter(void);
+u32 gb_timesync_platform_get_clock_rate(void);
+int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata);
+void gb_timesync_platform_unlock_bus(void);
+
+int gb_timesync_platform_init(void);
+void gb_timesync_platform_exit(void);
+
+/* Core API */
+int gb_timesync_interface_add(struct gb_interface *interface);
+void gb_timesync_interface_remove(struct gb_interface *interface);
+int gb_timesync_svc_add(struct gb_svc *svc);
+void gb_timesync_svc_remove(struct gb_svc *svc);
+
+u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface);
+u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc);
+int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time,
+                                  struct timespec *ts);
+int gb_timesync_to_timespec_by_interface(struct gb_interface *interface,
+                                        u64 frame_time, struct timespec *ts);
+
+int gb_timesync_schedule_synchronous(struct gb_interface *intf);
+void gb_timesync_schedule_asynchronous(struct gb_interface *intf);
+void gb_timesync_irq(struct gb_timesync_svc *timesync_svc);
+int gb_timesync_init(void);
+void gb_timesync_exit(void);
+
+#endif /* __TIMESYNC_H */
diff --git a/drivers/staging/greybus/timesync_platform.c b/drivers/staging/greybus/timesync_platform.c
new file mode 100644 (file)
index 0000000..50e8883
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * TimeSync API driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ *
+ * This code reads directly from an ARMv7 memory-mapped timer that lives in
+ * MMIO space. Since this counter lives inside of MMIO space its shared between
+ * cores and that means we don't have to worry about issues like TSC on x86
+ * where each time-stamp-counter (TSC) is local to a particular core.
+ *
+ * Register-level access code is based on
+ * drivers/clocksource/arm_arch_timer.c
+ */
+#include <linux/cpufreq.h>
+#include <linux/of_platform.h>
+
+#include "greybus.h"
+#include "arche_platform.h"
+
+static u32 gb_timesync_clock_frequency;
+int (*arche_platform_change_state_cb)(enum arche_platform_state state,
+                                     struct gb_timesync_svc *pdata);
+EXPORT_SYMBOL_GPL(arche_platform_change_state_cb);
+
+u64 gb_timesync_platform_get_counter(void)
+{
+       return (u64)get_cycles();
+}
+
+u32 gb_timesync_platform_get_clock_rate(void)
+{
+       if (unlikely(!gb_timesync_clock_frequency))
+               return cpufreq_get(0);
+
+       return gb_timesync_clock_frequency;
+}
+
+int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
+{
+       return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
+                                             pdata);
+}
+
+void gb_timesync_platform_unlock_bus(void)
+{
+       arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
+}
+
+static const struct of_device_id arch_timer_of_match[] = {
+       { .compatible   = "google,greybus-frame-time-counter", },
+       {},
+};
+
+int __init gb_timesync_platform_init(void)
+{
+       struct device_node *np;
+
+       np = of_find_matching_node(NULL, arch_timer_of_match);
+       if (!np) {
+               /* Tolerate not finding to allow BBB etc to continue */
+               pr_warn("Unable to find a compatible ARMv7 timer\n");
+               return 0;
+       }
+
+       if (of_property_read_u32(np, "clock-frequency",
+                                &gb_timesync_clock_frequency)) {
+               pr_err("Unable to find timer clock-frequency\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+void gb_timesync_platform_exit(void) {}
diff --git a/drivers/staging/greybus/tools/Android.mk b/drivers/staging/greybus/tools/Android.mk
new file mode 100644 (file)
index 0000000..fdadbf6
--- /dev/null
@@ -0,0 +1,10 @@
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= loopback_test.c
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE := gb_loopback_test
+
+include $(BUILD_EXECUTABLE)
+
diff --git a/drivers/staging/greybus/tools/Makefile b/drivers/staging/greybus/tools/Makefile
new file mode 100644 (file)
index 0000000..852b12b
--- /dev/null
@@ -0,0 +1,31 @@
+ifeq ($(strip $(V)), 1)
+  Q =
+else
+  Q = @
+endif
+
+CFLAGS += -std=gnu99 -Wall -Wextra -g \
+           -D_GNU_SOURCE \
+           -Wno-unused-parameter \
+           -Wmaybe-uninitialized \
+           -Wredundant-decls \
+           -Wcast-align \
+           -Wsign-compare \
+           -Wno-missing-field-initializers
+
+CC     := $(CROSS_COMPILE)gcc
+
+TOOLS = loopback_test
+
+all: $(TOOLS)
+
+%.o: %.c ../greybus_protocols.h
+       @echo '  TARGET_CC $@'
+       $(Q)$(CC) $(CFLAGS) -c $< -o $@
+
+loopback_%: loopback_%.o
+       @echo '  TARGET_LD $@'
+       $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ -o $@
+
+clean::
+       rm -f *.o $(TOOLS)
diff --git a/drivers/staging/greybus/tools/README.loopback b/drivers/staging/greybus/tools/README.loopback
new file mode 100644 (file)
index 0000000..845b08d
--- /dev/null
@@ -0,0 +1,198 @@
+
+
+        1 - LOOPBACK DRIVER
+
+The driver implements the main logic of the loopback test and provides
+sysfs files to configure the test and retrieve the results.
+A user could run a test without the need of the test application given
+that he understands the sysfs interface of the loopback driver.
+
+The loopback kernel driver needs to be loaded and at least one module
+with the loopback feature enabled must be present for the sysfs files to be
+created and for the loopback test application to be able to run.
+
+To load the module:
+# modprobe gb-loopback
+
+
+When the module is probed,  New files are available on the sysfs
+directory of the detected loopback device.
+(typically under "/sys/bus/graybus/devices").
+
+Here is a short summary of the sysfs interface files that should be visible:
+
+* Loopback Configuration Files:
+    async - Use asynchronous operations.
+    iteration_max - Number of tests iterations to perform.
+    size - payload size of the transfer.
+    timeout - The number of microseconds to give an individual
+              asynchronous request before timing out.
+    us_wait - Time to wait between 2 messages
+    type - By writing the test type to this file, the test starts.
+           Valid tests are:
+             0 stop the test
+             2 - ping
+             3 - transfer
+             4 - sink
+
+* Loopback feedback files:
+    error - number of errors that have occurred.
+    iteration_count - Number of iterations performed.
+    requests_completed - Number of requests successfully completed.
+    requests_timedout - Number of requests that have timed out.
+    timeout_max - Max allowed timeout
+    timeout_min - Min allowed timeout.
+
+* Loopback result files:
+    apbridge_unipro_latency_avg
+    apbridge_unipro_latency_max
+    apbridge_unipro_latency_min
+    gpbridge_firmware_latency_avg
+    gpbridge_firmware_latency_max
+    gpbridge_firmware_latency_min
+    requests_per_second_avg
+    requests_per_second_max
+    requests_per_second_min
+    latency_avg
+    latency_max
+    latency_min
+    throughput_avg
+    throughput_max
+    throughput_min
+
+
+
+            2 - LOOPBACK TEST APPLICATION
+
+The loopback test application manages and formats the results provided by
+the loopback kernel module. The purpose of this application
+is to:
+    - Start and manage multiple loopback device tests concurrently.
+    - Calculate the aggregate results for multiple devices.
+    - Gather and format test results (csv or human readable).
+
+The best way to get up to date usage information for the application is
+usually to pass the "-h" parameter.
+Here is the summary of the available options:
+
+ Mandatory arguments
+   -t     must be one of the test names - sink, transfer or ping
+   -i     iteration count - the number of iterations to run the test over
+ Optional arguments
+   -S     sysfs location - location for greybus 'endo' entires default /sys/bus/greybus/devices/
+   -D     debugfs location - location for loopback debugfs entries default /sys/kernel/debug/gb_loopback/
+   -s     size of data packet to send during test - defaults to zero
+   -m     mask - a bit mask of connections to include example: -m 8 = 4th connection -m 9 = 1st and 4th connection etc
+                 default is zero which means broadcast to all connections
+   -v     verbose output
+   -d     debug output
+   -r     raw data output - when specified the full list of latency values are included in the output CSV
+   -p     porcelain - when specified printout is in a user-friendly non-CSV format. This option suppresses writing to CSV file
+   -a     aggregate - show aggregation of all enabled devies
+   -l     list found loopback devices and exit.
+   -x     Async - Enable async transfers.
+   -o     Timeout - Timeout in microseconds for async operations.
+
+
+
+             3 - REAL WORLD EXAMPLE USAGES
+
+ 3.1 - Using the driver sysfs files to run a test on a single device:
+
+* Run a 1000 transfers of a 100 byte packet. Each transfer is started only
+after the previous one finished successfully:
+    echo 0 > /sys/bus/greybus/devices/1-2.17/type
+    echo 0 > /sys/bus/greybus/devices/1-2.17/async
+    echo 2000 > /sys/bus/greybus/devices/1-2.17/us_wait
+    echo 100 > /sys/bus/greybus/devices/1-2.17/size
+    echo 1000 > /sys/bus/greybus/devices/1-2.17/iteration_max
+    echo 0 > /sys/bus/greybus/devices/1-2.17/mask
+    echo 200000 > /sys/bus/greybus/devices/1-2.17/timeout
+    echo 3 > /sys/bus/greybus/devices/1-2.17/type
+
+* Run a 1000 transfers of a 100 byte packet. Transfers are started without
+waiting for the previous one to finish:
+    echo 0 > /sys/bus/greybus/devices/1-2.17/type
+    echo 3 > /sys/bus/greybus/devices/1-2.17/async
+    echo 0 > /sys/bus/greybus/devices/1-2.17/us_wait
+    echo 100 > /sys/bus/greybus/devices/1-2.17/size
+    echo 1000 > /sys/bus/greybus/devices/1-2.17/iteration_max
+    echo 0 > /sys/bus/greybus/devices/1-2.17/mask
+    echo 200000 > /sys/bus/greybus/devices/1-2.17/timeout
+    echo 3 > /sys/bus/greybus/devices/1-2.17/type
+
+* Read the results from sysfs:
+    cat /sys/bus/greybus/devices/1-2.17/requests_per_second_min
+    cat /sys/bus/greybus/devices/1-2.17/requests_per_second_max
+    cat /sys/bus/greybus/devices/1-2.17/requests_per_second_avg
+
+    cat /sys/bus/greybus/devices/1-2.17/latency_min
+    cat /sys/bus/greybus/devices/1-2.17/latency_max
+    cat /sys/bus/greybus/devices/1-2.17/latency_avg
+
+    cat /sys/bus/greybus/devices/1-2.17/apbridge_unipro_latency_min
+    cat /sys/bus/greybus/devices/1-2.17/apbridge_unipro_latency_max
+    cat /sys/bus/greybus/devices/1-2.17/apbridge_unipro_latency_avg
+
+    cat /sys/bus/greybus/devices/1-2.17/gpbridge_firmware_latency_min
+    cat /sys/bus/greybus/devices/1-2.17/gpbridge_firmware_latency_max
+    cat /sys/bus/greybus/devices/1-2.17/gpbridge_firmware_latency_avg
+
+    cat /sys/bus/greybus/devices/1-2.17/error
+    cat /sys/bus/greybus/devices/1-2.17/requests_completed
+    cat /sys/bus/greybus/devices/1-2.17/requests_timedout
+
+
+3.2 - using the test application:
+
+* Run a transfer test 10 iterations of size 100 bytes on all available devices
+    #/loopback_test -t transfer -i 10 -s 100
+    1970-1-1 0:10:7,transfer,1-4.17,100,10,0,443,509,471.700012,66,1963,2256,2124.600098,293,102776,118088,109318.898438,15312,1620,1998,1894.099976,378,56,57,56.799999,1
+    1970-1-1 0:10:7,transfer,1-5.17,100,10,0,399,542,463.399994,143,1845,2505,2175.800049,660,92568,125744,107393.296875,33176,1469,2305,1806.500000,836,56,57,56.799999,1
+
+
+* Show the aggregate results of both devices. ("-a")
+    #/loopback_test -t transfer -i 10 -s 100  -a
+    1970-1-1 0:10:35,transfer,1-4.17,100,10,0,448,580,494.100006,132,1722,2230,2039.400024,508,103936,134560,114515.703125,30624,1513,1980,1806.900024,467,56,57,57.299999,1
+    1970-1-1 0:10:35,transfer,1-5.17,100,10,0,383,558,478.600006,175,1791,2606,2115.199951,815,88856,129456,110919.703125,40600,1457,2246,1773.599976,789,56,57,57.099998,1
+    1970-1-1 0:10:35,transfer,aggregate,100,10,0,383,580,486.000000,197,1722,2606,2077.000000,884,88856,134560,112717.000000,45704,1457,2246,1789.000000,789,56,57,57.000000,1
+
+* Example usage of the mask option to select which devices will
+  run the test (1st, 2nd, or both devices):
+    # /loopback_test -t transfer -i 10 -s 100 -m 1
+    1970-1-1 0:11:56,transfer,1-4.17,100,10,0,514,558,544.900024,44,1791,1943,1836.599976,152,119248,129456,126301.296875,10208,1600,1001609,101613.601562,1000009,56,57,56.900002,1
+    # /loopback_test -t transfer -i 10 -s 100 -m 2
+    1970-1-1 0:12:0,transfer,1-5.17,100,10,0,468,554,539.000000,86,1804,2134,1859.500000,330,108576,128528,124932.500000,19952,1606,1626,1619.300049,20,56,57,57.400002,1
+    # /loopback_test -t transfer -i 10 -s 100 -m 3
+    1970-1-1 0:12:3,transfer,1-4.17,100,10,0,432,510,469.399994,78,1959,2313,2135.800049,354,100224,118320,108785.296875,18096,1610,2024,1893.500000,414,56,57,57.200001,1
+    1970-1-1 0:12:3,transfer,1-5.17,100,10,0,404,542,468.799988,138,1843,2472,2152.500000,629,93728,125744,108646.101562,32016,1504,2247,1853.099976,743,56,57,57.099998,1
+
+* Show output in human readable format ("-p")
+    # /loopback_test -t transfer -i 10 -s 100 -m 3 -p
+
+    1970-1-1 0:12:37
+    test:                       transfer
+    path:                       1-4.17
+    size:                       100
+    iterations:                 10
+    errors:                     0
+    async:                      Disabled
+    requests per-sec:           min=390, max=547, average=469.299988, jitter=157
+    ap-throughput B/s:          min=90480 max=126904 average=108762.101562 jitter=36424
+    ap-latency usec:            min=1826 max=2560 average=2146.000000 jitter=734
+    apbridge-latency usec:      min=1620 max=1982 average=1882.099976 jitter=362
+    gpbridge-latency usec:      min=56 max=57 average=57.099998 jitter=1
+
+
+    1970-1-1 0:12:37
+    test:                       transfer
+    path:                       1-5.17
+    size:                       100
+    iterations:                 10
+    errors:                     0
+    async:                      Disabled
+    requests per-sec:           min=397, max=538, average=461.700012, jitter=141
+    ap-throughput B/s:          min=92104 max=124816 average=106998.898438 jitter=32712
+    ap-latency usec:            min=1856 max=2514 average=2185.699951 jitter=658
+    apbridge-latency usec:      min=1460 max=2296 average=1828.599976 jitter=836
+    gpbridge-latency usec:      min=56 max=57 average=57.099998 jitter=1
diff --git a/drivers/staging/greybus/tools/lbtest b/drivers/staging/greybus/tools/lbtest
new file mode 100755 (executable)
index 0000000..d7353f1
--- /dev/null
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google, Inc.
+# Copyright (c) 2015 Linaro, Ltd.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from this
+# software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import print_function
+import csv
+import datetime
+import sys
+import time
+
+dict = {'ping': '2', 'transfer': '3', 'sink': '4'}
+verbose = 1
+
+def abort():
+       sys.exit(1)
+
+def usage():
+       print('Usage: looptest TEST SIZE ITERATIONS PATH\n\n'
+       '  Run TEST for a number of ITERATIONS with operation data SIZE bytes\n'
+       '  TEST may be \'ping\' \'transfer\' or \'sink\'\n'
+       '  SIZE indicates the size of transfer <= greybus max payload bytes\n'
+       '  ITERATIONS indicates the number of times to execute TEST at SIZE bytes\n'
+       '             Note if ITERATIONS is set to zero then this utility will\n'
+       '             initiate an infinite (non terminating) test and exit\n'
+       '             without logging any metrics data\n'
+       '  PATH indicates the sysfs path for the loopback greybus entries e.g.\n'
+       '             /sys/bus/greybus/devices/endo0:1:1:1:1/\n'
+       'Examples:\n'
+       '  looptest transfer 128 10000\n'
+       '  looptest ping 0 128\n'
+       '  looptest sink 2030 32768\n'
+       .format(sys.argv[0]), file=sys.stderr)
+
+       abort()
+
+def read_sysfs_int(path):
+       try:
+               f = open(path, "r");
+               val = f.read();
+               f.close()
+               return int(val)
+       except IOError as e:
+               print("I/O error({0}): {1}".format(e.errno, e.strerror))
+               print("Invalid path %s" % path)
+
+def write_sysfs_val(path, val):
+       try:
+               f = open(path, "r+")
+               f.write(val)
+               f.close()
+       except IOError as e:
+               print("I/O error({0}): {1}".format(e.errno, e.strerror))
+               print("Invalid path %s" % path)
+
+def log_csv(test_name, size, iteration_max, sys_pfx):
+       # file name will test_name_size_iteration_max.csv
+       # every time the same test with the same parameters is run we will then
+       # append to the same CSV with datestamp - representing each test dataset
+       fname = test_name + '_' + size + '_' + str(iteration_max) + '.csv'
+
+       try:
+               # gather data set
+               date = str(datetime.datetime.now())
+               error = read_sysfs_int(sys_pfx + 'error')
+               request_min = read_sysfs_int(sys_pfx + 'requests_per_second_min')
+               request_max = read_sysfs_int(sys_pfx + 'requests_per_second_max')
+               request_avg = read_sysfs_int(sys_pfx + 'requests_per_second_avg')
+               latency_min = read_sysfs_int(sys_pfx + 'latency_min')
+               latency_max = read_sysfs_int(sys_pfx + 'latency_max')
+               latency_avg = read_sysfs_int(sys_pfx + 'latency_avg')
+               throughput_min = read_sysfs_int(sys_pfx + 'throughput_min')
+               throughput_max = read_sysfs_int(sys_pfx + 'throughput_max')
+               throughput_avg = read_sysfs_int(sys_pfx + 'throughput_avg')
+
+               # derive jitter
+               request_jitter = request_max - request_min
+               latency_jitter = latency_max - latency_min
+               throughput_jitter = throughput_max - throughput_min
+
+               # append data set to file
+               with open(fname, 'a') as csvf:
+                       row = csv.writer(csvf, delimiter=",", quotechar="'",
+                                       quoting=csv.QUOTE_MINIMAL)
+                       row.writerow([date, test_name, size, iteration_max, error,
+                                       request_min, request_max, request_avg, request_jitter,
+                                       latency_min, latency_max, latency_avg, latency_jitter,
+                                       throughput_min, throughput_max, throughput_avg, throughput_jitter])
+       except IOError as e:
+               print("I/O error({0}): {1}".format(e.errno, e.strerror))
+
+def loopback_run(test_name, size, iteration_max, sys_pfx):
+       test_id = dict[test_name]
+       try:
+               # Terminate any currently running test
+               write_sysfs_val(sys_pfx + 'type', '0')
+               # Set parameter for no wait between messages
+               write_sysfs_val(sys_pfx + 'ms_wait', '0')
+               # Set operation size
+               write_sysfs_val(sys_pfx + 'size', size)
+               # Set iterations
+               write_sysfs_val(sys_pfx + 'iteration_max', str(iteration_max))
+               # Initiate by setting loopback operation type
+               write_sysfs_val(sys_pfx + 'type', test_id)
+               time.sleep(1)
+
+               if iteration_max == 0:
+                       print ("Infinite test initiated CSV won't be logged\n")
+                       return
+
+               previous = 0
+               err = 0
+               while True:
+                       # get current count bail out if it hasn't changed
+                       iteration_count = read_sysfs_int(sys_pfx + 'iteration_count')
+                       if previous == iteration_count:
+                               err = 1
+                               break
+                       elif iteration_count == iteration_max:
+                               break
+                       previous = iteration_count
+                       if verbose:
+                               print('%02d%% complete %d of %d ' %
+                                       (100 * iteration_count / iteration_max,
+                                       iteration_count, iteration_max))
+                       time.sleep(1)
+               if err:
+                       print ('\nError executing test\n')
+               else:
+                       log_csv(test_name, size, iteration_max, sys_pfx)
+       except ValueError as ve:
+               print("Error: %s " % format(e.strerror), file=sys.stderr)
+               abort()
+
+def main():
+       if len(sys.argv) < 5:
+               usage()
+
+       if sys.argv[1] in dict.keys():
+               loopback_run(sys.argv[1], sys.argv[2], int(sys.argv[3]), sys.argv[4])
+       else:
+               usage()
+if __name__ == '__main__':
+       main()
diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c
new file mode 100644 (file)
index 0000000..f7f4cd6
--- /dev/null
@@ -0,0 +1,1000 @@
+/*
+ * Loopback test application
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Provided under the three clause BSD license found in the LICENSE file.
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <poll.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <signal.h>
+
+#define MAX_NUM_DEVICES 10
+#define MAX_SYSFS_PATH 0x200
+#define CSV_MAX_LINE   0x1000
+#define SYSFS_MAX_INT  0x20
+#define MAX_STR_LEN    255
+#define DEFAULT_ASYNC_TIMEOUT 200000
+
+struct dict {
+       char *name;
+       int type;
+};
+
+static struct dict dict[] = {
+       {"ping", 2},
+       {"transfer", 3},
+       {"sink", 4},
+       {NULL,}         /* list termination */
+};
+
+struct loopback_results {
+       float latency_avg;
+       uint32_t latency_max;
+       uint32_t latency_min;
+       uint32_t latency_jitter;
+
+       float request_avg;
+       uint32_t request_max;
+       uint32_t request_min;
+       uint32_t request_jitter;
+
+       float throughput_avg;
+       uint32_t throughput_max;
+       uint32_t throughput_min;
+       uint32_t throughput_jitter;
+
+       float apbridge_unipro_latency_avg;
+       uint32_t apbridge_unipro_latency_max;
+       uint32_t apbridge_unipro_latency_min;
+       uint32_t apbridge_unipro_latency_jitter;
+
+       float gbphy_firmware_latency_avg;
+       uint32_t gbphy_firmware_latency_max;
+       uint32_t gbphy_firmware_latency_min;
+       uint32_t gbphy_firmware_latency_jitter;
+
+       uint32_t error;
+};
+
+struct loopback_device {
+       char name[MAX_SYSFS_PATH];
+       char sysfs_entry[MAX_SYSFS_PATH];
+       char debugfs_entry[MAX_SYSFS_PATH];
+       struct loopback_results results;
+};
+
+struct loopback_test {
+       int verbose;
+       int debug;
+       int raw_data_dump;
+       int porcelain;
+       int mask;
+       int size;
+       int iteration_max;
+       int aggregate_output;
+       int test_id;
+       int device_count;
+       int list_devices;
+       int use_async;
+       int async_timeout;
+       int async_outstanding_operations;
+       int us_wait;
+       int file_output;
+       int stop_all;
+       int poll_count;
+       char test_name[MAX_STR_LEN];
+       char sysfs_prefix[MAX_SYSFS_PATH];
+       char debugfs_prefix[MAX_SYSFS_PATH];
+       struct timespec poll_timeout;
+       struct loopback_device devices[MAX_NUM_DEVICES];
+       struct loopback_results aggregate_results;
+       struct pollfd fds[MAX_NUM_DEVICES];
+};
+
+struct loopback_test t;
+
+/* Helper macros to calculate the aggregate results for all devices */
+static inline int device_enabled(struct loopback_test *t, int dev_idx);
+
+#define GET_MAX(field)                                                 \
+static int get_##field##_aggregate(struct loopback_test *t)            \
+{                                                                      \
+       uint32_t max = 0;                                               \
+       int i;                                                          \
+       for (i = 0; i < t->device_count; i++) {                         \
+               if (!device_enabled(t, i))                              \
+                       continue;                                       \
+               if (t->devices[i].results.field > max)                  \
+                       max = t->devices[i].results.field;              \
+       }                                                               \
+       return max;                                                     \
+}                                                                      \
+
+#define GET_MIN(field)                                                 \
+static int get_##field##_aggregate(struct loopback_test *t)            \
+{                                                                      \
+       uint32_t min = ~0;                                              \
+       int i;                                                          \
+       for (i = 0; i < t->device_count; i++) {                         \
+               if (!device_enabled(t, i))                              \
+                       continue;                                       \
+               if (t->devices[i].results.field < min)                  \
+                       min = t->devices[i].results.field;              \
+       }                                                               \
+       return min;                                                     \
+}                                                                      \
+
+#define GET_AVG(field)                                                 \
+static int get_##field##_aggregate(struct loopback_test *t)            \
+{                                                                      \
+       uint32_t val = 0;                                               \
+       uint32_t count = 0;                                             \
+       int i;                                                          \
+       for (i = 0; i < t->device_count; i++) {                         \
+               if (!device_enabled(t, i))                              \
+                       continue;                                       \
+               count++;                                                \
+               val += t->devices[i].results.field;                     \
+       }                                                               \
+       if (count)                                                      \
+               val /= count;                                           \
+       return val;                                                     \
+}                                                                      \
+
+GET_MAX(throughput_max);
+GET_MAX(request_max);
+GET_MAX(latency_max);
+GET_MAX(apbridge_unipro_latency_max);
+GET_MAX(gbphy_firmware_latency_max);
+GET_MIN(throughput_min);
+GET_MIN(request_min);
+GET_MIN(latency_min);
+GET_MIN(apbridge_unipro_latency_min);
+GET_MIN(gbphy_firmware_latency_min);
+GET_AVG(throughput_avg);
+GET_AVG(request_avg);
+GET_AVG(latency_avg);
+GET_AVG(apbridge_unipro_latency_avg);
+GET_AVG(gbphy_firmware_latency_avg);
+
+void abort()
+{
+       _exit(1);
+}
+
+void usage(void)
+{
+       fprintf(stderr, "Usage: loopback_test TEST [SIZE] ITERATIONS [SYSPATH] [DBGPATH]\n\n"
+       "  Run TEST for a number of ITERATIONS with operation data SIZE bytes\n"
+       "  TEST may be \'ping\' \'transfer\' or \'sink\'\n"
+       "  SIZE indicates the size of transfer <= greybus max payload bytes\n"
+       "  ITERATIONS indicates the number of times to execute TEST at SIZE bytes\n"
+       "             Note if ITERATIONS is set to zero then this utility will\n"
+       "             initiate an infinite (non terminating) test and exit\n"
+       "             without logging any metrics data\n"
+       "  SYSPATH indicates the sysfs path for the loopback greybus entries e.g.\n"
+       "          /sys/bus/greybus/devices\n"
+       "  DBGPATH indicates the debugfs path for the loopback greybus entries e.g.\n"
+       "          /sys/kernel/debug/gb_loopback/\n"
+       " Mandatory arguments\n"
+       "   -t     must be one of the test names - sink, transfer or ping\n"
+       "   -i     iteration count - the number of iterations to run the test over\n"
+       " Optional arguments\n"
+       "   -S     sysfs location - location for greybus 'endo' entires default /sys/bus/greybus/devices/\n"
+       "   -D     debugfs location - location for loopback debugfs entries default /sys/kernel/debug/gb_loopback/\n"
+       "   -s     size of data packet to send during test - defaults to zero\n"
+       "   -m     mask - a bit mask of connections to include example: -m 8 = 4th connection -m 9 = 1st and 4th connection etc\n"
+       "                 default is zero which means broadcast to all connections\n"
+       "   -v     verbose output\n"
+       "   -d     debug output\n"
+       "   -r     raw data output - when specified the full list of latency values are included in the output CSV\n"
+       "   -p     porcelain - when specified printout is in a user-friendly non-CSV format. This option suppresses writing to CSV file\n"
+       "   -a     aggregate - show aggregation of all enabled devices\n"
+       "   -l     list found loopback devices and exit\n"
+       "   -x     Async - Enable async transfers\n"
+       "   -o     Async Timeout - Timeout in uSec for async operations\n"
+       "   -O     Poll loop time out in seconds(max time a test is expected to last, default: 30sec)\n"
+       "   -c     Max number of outstanding operations for async operations\n"
+       "   -w     Wait in uSec between operations\n"
+       "   -z     Enable output to a CSV file (incompatible with -p)\n"
+       "   -f     When starting new loopback test, stop currently running tests on all devices\n"
+       "Examples:\n"
+       "  Send 10000 transfers with a packet size of 128 bytes to all active connections\n"
+       "  loopback_test -t transfer -s 128 -i 10000 -S /sys/bus/greybus/devices/ -D /sys/kernel/debug/gb_loopback/\n"
+       "  loopback_test -t transfer -s 128 -i 10000 -m 0\n"
+       "  Send 10000 transfers with a packet size of 128 bytes to connection 1 and 4\n"
+       "  loopback_test -t transfer -s 128 -i 10000 -m 9\n"
+       "  loopback_test -t ping -s 0 128 -i -S /sys/bus/greybus/devices/ -D /sys/kernel/debug/gb_loopback/\n"
+       "  loopback_test -t sink -s 2030 -i 32768 -S /sys/bus/greybus/devices/ -D /sys/kernel/debug/gb_loopback/\n");
+       abort();
+}
+
+static inline int device_enabled(struct loopback_test *t, int dev_idx)
+{
+       if (!t->mask || (t->mask & (1 << dev_idx)))
+               return 1;
+
+       return 0;
+}
+
+static void show_loopback_devices(struct loopback_test *t)
+{
+       int i;
+
+       if (t->device_count == 0) {
+               printf("No loopback devices.\n");
+               return;
+       }
+
+       for (i = 0; i < t->device_count; i++)
+               printf("device[%d] = %s\n", i, t->devices[i].name);
+
+}
+
+int open_sysfs(const char *sys_pfx, const char *node, int flags)
+{
+       int fd;
+       char path[MAX_SYSFS_PATH];
+
+       snprintf(path, sizeof(path), "%s%s", sys_pfx, node);
+       fd = open(path, flags);
+       if (fd < 0) {
+               fprintf(stderr, "unable to open %s\n", path);
+               abort();
+       }
+       return fd;
+}
+
+int read_sysfs_int_fd(int fd, const char *sys_pfx, const char *node)
+{
+       char buf[SYSFS_MAX_INT];
+
+       if (read(fd, buf, sizeof(buf)) < 0) {
+               fprintf(stderr, "unable to read from %s%s %s\n", sys_pfx, node,
+                       strerror(errno));
+               close(fd);
+               abort();
+       }
+       return atoi(buf);
+}
+
+float read_sysfs_float_fd(int fd, const char *sys_pfx, const char *node)
+{
+       char buf[SYSFS_MAX_INT];
+
+       if (read(fd, buf, sizeof(buf)) < 0) {
+
+               fprintf(stderr, "unable to read from %s%s %s\n", sys_pfx, node,
+                       strerror(errno));
+               close(fd);
+               abort();
+       }
+       return atof(buf);
+}
+
+int read_sysfs_int(const char *sys_pfx, const char *node)
+{
+       int fd, val;
+
+       fd = open_sysfs(sys_pfx, node, O_RDONLY);
+       val = read_sysfs_int_fd(fd, sys_pfx, node);
+       close(fd);
+       return val;
+}
+
+float read_sysfs_float(const char *sys_pfx, const char *node)
+{
+       int fd;
+       float val;
+
+       fd = open_sysfs(sys_pfx, node, O_RDONLY);
+       val = read_sysfs_float_fd(fd, sys_pfx, node);
+       close(fd);
+       return val;
+}
+
+void write_sysfs_val(const char *sys_pfx, const char *node, int val)
+{
+       int fd, len;
+       char buf[SYSFS_MAX_INT];
+
+       fd = open_sysfs(sys_pfx, node, O_RDWR);
+       len = snprintf(buf, sizeof(buf), "%d", val);
+       if (write(fd, buf, len) < 0) {
+               fprintf(stderr, "unable to write to %s%s %s\n", sys_pfx, node,
+                       strerror(errno));
+               close(fd);
+               abort();
+       }
+       close(fd);
+}
+
+static int get_results(struct loopback_test *t)
+{
+       struct loopback_device *d;
+       struct loopback_results *r;
+       int i;
+
+       for (i = 0; i < t->device_count; i++) {
+               if (!device_enabled(t, i))
+                       continue;
+
+               d = &t->devices[i];
+               r = &d->results;
+
+               r->error = read_sysfs_int(d->sysfs_entry, "error");
+               r->request_min = read_sysfs_int(d->sysfs_entry, "requests_per_second_min");
+               r->request_max = read_sysfs_int(d->sysfs_entry, "requests_per_second_max");
+               r->request_avg = read_sysfs_float(d->sysfs_entry, "requests_per_second_avg");
+
+               r->latency_min = read_sysfs_int(d->sysfs_entry, "latency_min");
+               r->latency_max = read_sysfs_int(d->sysfs_entry, "latency_max");
+               r->latency_avg = read_sysfs_float(d->sysfs_entry, "latency_avg");
+
+               r->throughput_min = read_sysfs_int(d->sysfs_entry, "throughput_min");
+               r->throughput_max = read_sysfs_int(d->sysfs_entry, "throughput_max");
+               r->throughput_avg = read_sysfs_float(d->sysfs_entry, "throughput_avg");
+
+               r->apbridge_unipro_latency_min =
+                       read_sysfs_int(d->sysfs_entry, "apbridge_unipro_latency_min");
+               r->apbridge_unipro_latency_max =
+                       read_sysfs_int(d->sysfs_entry, "apbridge_unipro_latency_max");
+               r->apbridge_unipro_latency_avg =
+                       read_sysfs_float(d->sysfs_entry, "apbridge_unipro_latency_avg");
+
+               r->gbphy_firmware_latency_min =
+                       read_sysfs_int(d->sysfs_entry, "gbphy_firmware_latency_min");
+               r->gbphy_firmware_latency_max =
+                       read_sysfs_int(d->sysfs_entry, "gbphy_firmware_latency_max");
+               r->gbphy_firmware_latency_avg =
+                       read_sysfs_float(d->sysfs_entry, "gbphy_firmware_latency_avg");
+
+               r->request_jitter = r->request_max - r->request_min;
+               r->latency_jitter = r->latency_max - r->latency_min;
+               r->throughput_jitter = r->throughput_max - r->throughput_min;
+               r->apbridge_unipro_latency_jitter =
+                       r->apbridge_unipro_latency_max - r->apbridge_unipro_latency_min;
+               r->gbphy_firmware_latency_jitter =
+                       r->gbphy_firmware_latency_max - r->gbphy_firmware_latency_min;
+
+       }
+
+       /*calculate the aggregate results of all enabled devices */
+       if (t->aggregate_output) {
+               r = &t->aggregate_results;
+
+               r->request_min = get_request_min_aggregate(t);
+               r->request_max = get_request_max_aggregate(t);
+               r->request_avg = get_request_avg_aggregate(t);
+
+               r->latency_min = get_latency_min_aggregate(t);
+               r->latency_max = get_latency_max_aggregate(t);
+               r->latency_avg = get_latency_avg_aggregate(t);
+
+               r->throughput_min = get_throughput_min_aggregate(t);
+               r->throughput_max = get_throughput_max_aggregate(t);
+               r->throughput_avg = get_throughput_avg_aggregate(t);
+
+               r->apbridge_unipro_latency_min =
+                       get_apbridge_unipro_latency_min_aggregate(t);
+               r->apbridge_unipro_latency_max =
+                       get_apbridge_unipro_latency_max_aggregate(t);
+               r->apbridge_unipro_latency_avg =
+                       get_apbridge_unipro_latency_avg_aggregate(t);
+
+               r->gbphy_firmware_latency_min =
+                       get_gbphy_firmware_latency_min_aggregate(t);
+               r->gbphy_firmware_latency_max =
+                       get_gbphy_firmware_latency_max_aggregate(t);
+               r->gbphy_firmware_latency_avg =
+                       get_gbphy_firmware_latency_avg_aggregate(t);
+
+               r->request_jitter = r->request_max - r->request_min;
+               r->latency_jitter = r->latency_max - r->latency_min;
+               r->throughput_jitter = r->throughput_max - r->throughput_min;
+               r->apbridge_unipro_latency_jitter =
+                       r->apbridge_unipro_latency_max - r->apbridge_unipro_latency_min;
+               r->gbphy_firmware_latency_jitter =
+                       r->gbphy_firmware_latency_max - r->gbphy_firmware_latency_min;
+
+       }
+
+       return 0;
+}
+
+void log_csv_error(int len, int err)
+{
+       fprintf(stderr, "unable to write %d bytes to csv %s\n", len,
+               strerror(err));
+}
+
+int format_output(struct loopback_test *t,
+                       struct loopback_results *r,
+                       const char *dev_name,
+                       char *buf, int buf_len,
+                       struct tm *tm)
+{
+       int len = 0;
+
+       memset(buf, 0x00, buf_len);
+       len = snprintf(buf, buf_len, "%u-%u-%u %u:%u:%u",
+                      tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+                      tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+       if (t->porcelain) {
+               len += snprintf(&buf[len], buf_len - len,
+                       "\n test:\t\t\t%s\n path:\t\t\t%s\n size:\t\t\t%u\n iterations:\t\t%u\n errors:\t\t%u\n async:\t\t\t%s\n",
+                       t->test_name,
+                       dev_name,
+                       t->size,
+                       t->iteration_max,
+                       r->error,
+                       t->use_async ? "Enabled" : "Disabled");
+
+               len += snprintf(&buf[len], buf_len - len,
+                       " requests per-sec:\tmin=%u, max=%u, average=%f, jitter=%u\n",
+                       r->request_min,
+                       r->request_max,
+                       r->request_avg,
+                       r->request_jitter);
+
+               len += snprintf(&buf[len], buf_len - len,
+                       " ap-throughput B/s:\tmin=%u max=%u average=%f jitter=%u\n",
+                       r->throughput_min,
+                       r->throughput_max,
+                       r->throughput_avg,
+                       r->throughput_jitter);
+               len += snprintf(&buf[len], buf_len - len,
+                       " ap-latency usec:\tmin=%u max=%u average=%f jitter=%u\n",
+                       r->latency_min,
+                       r->latency_max,
+                       r->latency_avg,
+                       r->latency_jitter);
+               len += snprintf(&buf[len], buf_len - len,
+                       " apbridge-latency usec:\tmin=%u max=%u average=%f jitter=%u\n",
+                       r->apbridge_unipro_latency_min,
+                       r->apbridge_unipro_latency_max,
+                       r->apbridge_unipro_latency_avg,
+                       r->apbridge_unipro_latency_jitter);
+
+               len += snprintf(&buf[len], buf_len - len,
+                       " gbphy-latency usec:\tmin=%u max=%u average=%f jitter=%u\n",
+                       r->gbphy_firmware_latency_min,
+                       r->gbphy_firmware_latency_max,
+                       r->gbphy_firmware_latency_avg,
+                       r->gbphy_firmware_latency_jitter);
+
+       } else {
+               len += snprintf(&buf[len], buf_len- len, ",%s,%s,%u,%u,%u",
+                       t->test_name, dev_name, t->size, t->iteration_max,
+                       r->error);
+
+               len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
+                       r->request_min,
+                       r->request_max,
+                       r->request_avg,
+                       r->request_jitter);
+
+               len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
+                       r->latency_min,
+                       r->latency_max,
+                       r->latency_avg,
+                       r->latency_jitter);
+
+               len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
+                       r->throughput_min,
+                       r->throughput_max,
+                       r->throughput_avg,
+                       r->throughput_jitter);
+
+               len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
+                       r->apbridge_unipro_latency_min,
+                       r->apbridge_unipro_latency_max,
+                       r->apbridge_unipro_latency_avg,
+                       r->apbridge_unipro_latency_jitter);
+
+               len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
+                       r->gbphy_firmware_latency_min,
+                       r->gbphy_firmware_latency_max,
+                       r->gbphy_firmware_latency_avg,
+                       r->gbphy_firmware_latency_jitter);
+       }
+
+       printf("\n%s\n", buf);
+
+       return len;
+}
+
+static int log_results(struct loopback_test *t)
+{
+       int fd, i, len, ret;
+       struct tm tm;
+       time_t local_time;
+       mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
+       char file_name[MAX_SYSFS_PATH];
+       char data[CSV_MAX_LINE];
+
+       local_time = time(NULL);
+       tm = *localtime(&local_time);
+
+       /*
+       * file name will test_name_size_iteration_max.csv
+       * every time the same test with the same parameters is run we will then
+       * append to the same CSV with datestamp - representing each test
+       * dataset.
+       */
+       if (t->file_output && !t->porcelain) {
+               snprintf(file_name, sizeof(file_name), "%s_%d_%d.csv",
+                       t->test_name, t->size, t->iteration_max);
+
+               fd = open(file_name, O_WRONLY | O_CREAT | O_APPEND, mode);
+               if (fd < 0) {
+                       fprintf(stderr, "unable to open %s for appendation\n", file_name);
+                       abort();
+               }
+
+       }
+       for (i = 0; i < t->device_count; i++) {
+               if (!device_enabled(t, i))
+                       continue;
+
+               len = format_output(t, &t->devices[i].results,
+                                       t->devices[i].name,
+                                       data, sizeof(data), &tm);
+               if (t->file_output && !t->porcelain) {
+                       ret = write(fd, data, len);
+                       if (ret == -1)
+                               fprintf(stderr, "unable to write %d bytes to csv.\n", len);
+               }
+
+       }
+
+
+       if (t->aggregate_output) {
+               len = format_output(t, &t->aggregate_results, "aggregate",
+                                       data, sizeof(data), &tm);
+               if (t->file_output && !t->porcelain) {
+                       ret = write(fd, data, len);
+                       if (ret == -1)
+                               fprintf(stderr, "unable to write %d bytes to csv.\n", len);
+               }
+       }
+
+       if (t->file_output && !t->porcelain)
+               close(fd);
+
+       return 0;
+}
+
+int is_loopback_device(const char *path, const char *node)
+{
+       char file[MAX_SYSFS_PATH];
+
+       snprintf(file, MAX_SYSFS_PATH, "%s%s/iteration_count", path, node);
+       if (access(file, F_OK) == 0)
+               return 1;
+       return 0;
+}
+
+int find_loopback_devices(struct loopback_test *t)
+{
+       struct dirent **namelist;
+       int i, n, ret;
+       unsigned int dev_id;
+       struct loopback_device *d;
+
+       n = scandir(t->sysfs_prefix, &namelist, NULL, alphasort);
+       if (n < 0) {
+               perror("scandir");
+               ret = -ENODEV;
+               goto baddir;
+       }
+
+       /* Don't include '.' and '..' */
+       if (n <= 2) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       for (i = 0; i < n; i++) {
+               ret = sscanf(namelist[i]->d_name, "gb_loopback%u", &dev_id);
+               if (ret != 1)
+                       continue;
+
+               if (!is_loopback_device(t->sysfs_prefix, namelist[i]->d_name))
+                       continue;
+
+               if (t->device_count == MAX_NUM_DEVICES) {
+                       fprintf(stderr, "max number of devices reached!\n");
+                       break;
+               }
+
+               d = &t->devices[t->device_count++];
+               snprintf(d->name, MAX_STR_LEN, "gb_loopback%u", dev_id);
+
+               snprintf(d->sysfs_entry, MAX_SYSFS_PATH, "%s%s/",
+                       t->sysfs_prefix, d->name);
+
+               snprintf(d->debugfs_entry, MAX_SYSFS_PATH, "%sraw_latency_%s",
+                       t->debugfs_prefix, d->name);
+
+               if (t->debug)
+                       printf("add %s %s\n", d->sysfs_entry,
+                               d->debugfs_entry);
+       }
+
+       ret = 0;
+done:
+       for (i = 0; i < n; i++)
+               free(namelist[n]);
+       free(namelist);
+baddir:
+       return ret;
+}
+
+static int open_poll_files(struct loopback_test *t)
+{
+       struct loopback_device *dev;
+       char buf[MAX_STR_LEN];
+       char dummy;
+       int fds_idx = 0;
+       int i;
+
+       for (i = 0; i < t->device_count; i++) {
+               dev = &t->devices[i];
+
+               if (!device_enabled(t, i))
+                       continue;
+
+               snprintf(buf, sizeof(buf), "%s%s", dev->sysfs_entry, "iteration_count");
+               t->fds[fds_idx].fd = open(buf, O_RDONLY);
+               if (t->fds[fds_idx].fd < 0) {
+                       fprintf(stderr, "Error opening poll file!\n");
+                       goto err;
+               }
+               read(t->fds[fds_idx].fd, &dummy, 1);
+               t->fds[fds_idx].events = POLLERR|POLLPRI;
+               t->fds[fds_idx].revents = 0;
+               fds_idx++;
+       }
+
+       t->poll_count = fds_idx;
+
+       return 0;
+
+err:
+       for (i = 0; i < fds_idx; i++)
+               close(t->fds[fds_idx].fd);
+
+       return -1;
+}
+
+static int close_poll_files(struct loopback_test *t)
+{
+       int i;
+       for (i = 0; i < t->poll_count; i++)
+               close(t->fds[i].fd);
+
+       return 0;
+}
+static int is_complete(struct loopback_test *t)
+{
+       int iteration_count;
+       int i;
+
+       for (i = 0; i < t->device_count; i++) {
+               if (!device_enabled(t, i))
+                       continue;
+
+               iteration_count = read_sysfs_int(t->devices[i].sysfs_entry,
+                                                "iteration_count");
+
+               /* at least one device did not finish yet */
+               if (iteration_count != t->iteration_max)
+                       return 0;
+       }
+
+       return 1;
+}
+
+static void stop_tests(struct loopback_test *t)
+{
+       int i;
+
+       for (i = 0; i < t->device_count; i++) {
+               if (!device_enabled(t, i))
+                       continue;
+               write_sysfs_val(t->devices[i].sysfs_entry, "type", 0);
+       }
+}
+
+static void handler(int sig) { /* do nothing */  }
+
+static int wait_for_complete(struct loopback_test *t)
+{
+       int number_of_events = 0;
+       char dummy;
+       int ret;
+       int i;
+       struct timespec *ts = NULL;
+       struct sigaction sa;
+       sigset_t mask_old, mask;
+
+       sigemptyset(&mask);
+       sigemptyset(&mask_old);
+       sigaddset(&mask, SIGINT);
+       sigprocmask(SIG_BLOCK, &mask, &mask_old);
+
+       sa.sa_handler = handler;
+       sa.sa_flags = 0;
+       sigemptyset(&sa.sa_mask);
+       if (sigaction(SIGINT, &sa, NULL) == -1) {
+               fprintf(stderr, "sigaction error\n");
+               return -1;
+       }
+
+       if (t->poll_timeout.tv_sec != 0)
+               ts = &t->poll_timeout;
+
+       while (1) {
+
+               ret = ppoll(t->fds, t->poll_count, ts, &mask_old);
+               if (ret <= 0) {
+                       stop_tests(t);
+                       fprintf(stderr, "Poll exit with errno %d\n", errno);
+                       return -1;
+               }
+
+               for (i = 0; i < t->poll_count; i++) {
+                       if (t->fds[i].revents & POLLPRI) {
+                               /* Dummy read to clear the event */
+                               read(t->fds[i].fd, &dummy, 1);
+                               number_of_events++;
+                       }
+               }
+
+               if (number_of_events == t->poll_count)
+                       break;
+       }
+
+       if (!is_complete(t)) {
+               fprintf(stderr, "Iteration count did not finish!\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static void prepare_devices(struct loopback_test *t)
+{
+       int i;
+
+       /* Cancel any running tests on enabled devices. If
+        * stop_all option is given, stop test on all devices.
+        */
+       for (i = 0; i < t->device_count; i++)
+               if (t->stop_all || device_enabled(t, i))
+                       write_sysfs_val(t->devices[i].sysfs_entry, "type", 0);
+
+
+       for (i = 0; i < t->device_count; i++) {
+               if (!device_enabled(t, i))
+                       continue;
+
+               write_sysfs_val(t->devices[i].sysfs_entry, "us_wait",
+                               t->us_wait);
+
+               /* Set operation size */
+               write_sysfs_val(t->devices[i].sysfs_entry, "size", t->size);
+
+               /* Set iterations */
+               write_sysfs_val(t->devices[i].sysfs_entry, "iteration_max",
+                               t->iteration_max);
+
+               if (t->use_async) {
+                       write_sysfs_val(t->devices[i].sysfs_entry,
+                               "async", 1);
+                       write_sysfs_val(t->devices[i].sysfs_entry,
+                               "timeout", t->async_timeout);
+                       write_sysfs_val(t->devices[i].sysfs_entry,
+                               "outstanding_operations_max",
+                               t->async_outstanding_operations);
+               } else
+                       write_sysfs_val(t->devices[i].sysfs_entry,
+                               "async", 0);
+       }
+}
+
+static int start(struct loopback_test *t)
+{
+       int i;
+
+       /* the test starts by writing test_id to the type file. */
+       for (i = 0; i < t->device_count; i++) {
+               if (!device_enabled(t, i))
+                       continue;
+
+               write_sysfs_val(t->devices[i].sysfs_entry, "type", t->test_id);
+       }
+
+       return 0;
+}
+
+
+void loopback_run(struct loopback_test *t)
+{
+       int i;
+       int ret;
+
+       for (i = 0; dict[i].name != NULL; i++) {
+               if (strstr(dict[i].name, t->test_name))
+                       t->test_id = dict[i].type;
+       }
+       if (!t->test_id) {
+               fprintf(stderr, "invalid test %s\n", t->test_name);
+               usage();
+               return;
+       }
+
+       prepare_devices(t);
+
+       ret = open_poll_files(t);
+       if (ret)
+               goto err;
+
+       start(t);
+
+       ret = wait_for_complete(t);
+       close_poll_files(t);
+       if (ret)
+               goto err;
+
+
+       get_results(t);
+
+       log_results(t);
+
+       return;
+
+err:
+       printf("Error running test\n");
+       return;
+}
+
+static int sanity_check(struct loopback_test *t)
+{
+       int i;
+
+       if (t->device_count == 0) {
+               fprintf(stderr, "No loopback devices found\n");
+               return -1;
+       }
+
+       for (i = 0; i < MAX_NUM_DEVICES; i++) {
+               if (!device_enabled(t, i))
+                       continue;
+
+               if (t->mask && !strcmp(t->devices[i].name, "")) {
+                       fprintf(stderr, "Bad device mask %x\n", (1 << i));
+                       return -1;
+               }
+
+       }
+
+
+       return 0;
+}
+
+int main(int argc, char *argv[])
+{
+       int o, ret;
+       char *sysfs_prefix = "/sys/class/gb_loopback/";
+       char *debugfs_prefix = "/sys/kernel/debug/gb_loopback/";
+
+       memset(&t, 0, sizeof(t));
+
+       while ((o = getopt(argc, argv,
+                          "t:s:i:S:D:m:v::d::r::p::a::l::x::o:O:c:w:z::f::")) != -1) {
+               switch (o) {
+               case 't':
+                       snprintf(t.test_name, MAX_STR_LEN, "%s", optarg);
+                       break;
+               case 's':
+                       t.size = atoi(optarg);
+                       break;
+               case 'i':
+                       t.iteration_max = atoi(optarg);
+                       break;
+               case 'S':
+                       snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
+                       break;
+               case 'D':
+                       snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
+                       break;
+               case 'm':
+                       t.mask = atol(optarg);
+                       break;
+               case 'v':
+                       t.verbose = 1;
+                       break;
+               case 'd':
+                       t.debug = 1;
+                       break;
+               case 'r':
+                       t.raw_data_dump = 1;
+                       break;
+               case 'p':
+                       t.porcelain = 1;
+                       break;
+               case 'a':
+                       t.aggregate_output = 1;
+                       break;
+               case 'l':
+                       t.list_devices = 1;
+                       break;
+               case 'x':
+                       t.use_async = 1;
+                       break;
+               case 'o':
+                       t.async_timeout = atoi(optarg);
+                       break;
+               case 'O':
+                       t.poll_timeout.tv_sec = atoi(optarg);
+                       break;
+               case 'c':
+                       t.async_outstanding_operations = atoi(optarg);
+                       break;
+               case 'w':
+                       t.us_wait = atoi(optarg);
+                       break;
+               case 'z':
+                       t.file_output = 1;
+                       break;
+               case 'f':
+                       t.stop_all = 1;
+                       break;
+               default:
+                       usage();
+                       return -EINVAL;
+               }
+       }
+
+       if (!strcmp(t.sysfs_prefix, ""))
+               snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", sysfs_prefix);
+
+       if (!strcmp(t.debugfs_prefix, ""))
+               snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", debugfs_prefix);
+
+       ret = find_loopback_devices(&t);
+       if (ret)
+               return ret;
+       ret = sanity_check(&t);
+       if (ret)
+               return ret;
+
+       if (t.list_devices) {
+               show_loopback_devices(&t);
+               return 0;
+       }
+
+       if (t.test_name[0] == '\0' || t.iteration_max == 0)
+               usage();
+
+       if (t.async_timeout == 0)
+               t.async_timeout = DEFAULT_ASYNC_TIMEOUT;
+
+       loopback_run(&t);
+
+       return 0;
+}
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
new file mode 100644 (file)
index 0000000..01aeed1
--- /dev/null
@@ -0,0 +1,1075 @@
+/*
+ * UART driver for the Greybus "generic" UART module.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ *
+ * Heavily based on drivers/usb/class/cdc-acm.c and
+ * drivers/usb/serial/usb-serial.c.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/idr.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/kfifo.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+
+#define GB_NUM_MINORS  16      /* 16 is is more than enough */
+#define GB_NAME                "ttyGB"
+
+#define GB_UART_WRITE_FIFO_SIZE                PAGE_SIZE
+#define GB_UART_WRITE_ROOM_MARGIN      1       /* leave some space in fifo */
+#define GB_UART_FIRMWARE_CREDITS       4096
+#define GB_UART_CREDIT_WAIT_TIMEOUT_MSEC       10000
+
+struct gb_tty_line_coding {
+       __le32  rate;
+       __u8    format;
+       __u8    parity;
+       __u8    data_bits;
+       __u8    flow_control;
+};
+
+struct gb_tty {
+       struct gbphy_device *gbphy_dev;
+       struct tty_port port;
+       void *buffer;
+       size_t buffer_payload_max;
+       struct gb_connection *connection;
+       u16 cport_id;
+       unsigned int minor;
+       unsigned char clocal;
+       bool disconnected;
+       spinlock_t read_lock;
+       spinlock_t write_lock;
+       struct async_icount iocount;
+       struct async_icount oldcount;
+       wait_queue_head_t wioctl;
+       struct mutex mutex;
+       u8 ctrlin;      /* input control lines */
+       u8 ctrlout;     /* output control lines */
+       struct gb_tty_line_coding line_coding;
+       struct work_struct tx_work;
+       struct kfifo write_fifo;
+       bool close_pending;
+       unsigned int credits;
+       struct completion credits_complete;
+};
+
+static struct tty_driver *gb_tty_driver;
+static DEFINE_IDR(tty_minors);
+static DEFINE_MUTEX(table_lock);
+
+static int gb_uart_receive_data_handler(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gb_tty *gb_tty = gb_connection_get_data(connection);
+       struct tty_port *port = &gb_tty->port;
+       struct gb_message *request = op->request;
+       struct gb_uart_recv_data_request *receive_data;
+       u16 recv_data_size;
+       int count;
+       unsigned long tty_flags = TTY_NORMAL;
+
+       if (request->payload_size < sizeof(*receive_data)) {
+               dev_err(&gb_tty->gbphy_dev->dev,
+                               "short receive-data request received (%zu < %zu)\n",
+                               request->payload_size, sizeof(*receive_data));
+               return -EINVAL;
+       }
+
+       receive_data = op->request->payload;
+       recv_data_size = le16_to_cpu(receive_data->size);
+
+       if (recv_data_size != request->payload_size - sizeof(*receive_data)) {
+               dev_err(&gb_tty->gbphy_dev->dev,
+                               "malformed receive-data request received (%u != %zu)\n",
+                               recv_data_size,
+                               request->payload_size - sizeof(*receive_data));
+               return -EINVAL;
+       }
+
+       if (!recv_data_size)
+               return -EINVAL;
+
+       if (receive_data->flags) {
+               if (receive_data->flags & GB_UART_RECV_FLAG_BREAK)
+                       tty_flags = TTY_BREAK;
+               else if (receive_data->flags & GB_UART_RECV_FLAG_PARITY)
+                       tty_flags = TTY_PARITY;
+               else if (receive_data->flags & GB_UART_RECV_FLAG_FRAMING)
+                       tty_flags = TTY_FRAME;
+
+               /* overrun is special, not associated with a char */
+               if (receive_data->flags & GB_UART_RECV_FLAG_OVERRUN)
+                       tty_insert_flip_char(port, 0, TTY_OVERRUN);
+       }
+       count = tty_insert_flip_string_fixed_flag(port, receive_data->data,
+                                                 tty_flags, recv_data_size);
+       if (count != recv_data_size) {
+               dev_err(&gb_tty->gbphy_dev->dev,
+                       "UART: RX 0x%08x bytes only wrote 0x%08x\n",
+                       recv_data_size, count);
+       }
+       if (count)
+               tty_flip_buffer_push(port);
+       return 0;
+}
+
+static int gb_uart_serial_state_handler(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gb_tty *gb_tty = gb_connection_get_data(connection);
+       struct gb_message *request = op->request;
+       struct gb_uart_serial_state_request *serial_state;
+
+       if (request->payload_size < sizeof(*serial_state)) {
+               dev_err(&gb_tty->gbphy_dev->dev,
+                               "short serial-state event received (%zu < %zu)\n",
+                               request->payload_size, sizeof(*serial_state));
+               return -EINVAL;
+       }
+
+       serial_state = request->payload;
+       gb_tty->ctrlin = serial_state->control;
+
+       return 0;
+}
+
+static int gb_uart_receive_credits_handler(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gb_tty *gb_tty = gb_connection_get_data(connection);
+       struct gb_message *request = op->request;
+       struct gb_uart_receive_credits_request *credit_request;
+       unsigned long flags;
+       unsigned int incoming_credits;
+       int ret = 0;
+
+       if (request->payload_size < sizeof(*credit_request)) {
+               dev_err(&gb_tty->gbphy_dev->dev,
+                               "short receive_credits event received (%zu < %zu)\n",
+                               request->payload_size,
+                               sizeof(*credit_request));
+               return -EINVAL;
+       }
+
+       credit_request = request->payload;
+       incoming_credits = le16_to_cpu(credit_request->count);
+
+       spin_lock_irqsave(&gb_tty->write_lock, flags);
+       gb_tty->credits += incoming_credits;
+       if (gb_tty->credits > GB_UART_FIRMWARE_CREDITS) {
+               gb_tty->credits -= incoming_credits;
+               ret = -EINVAL;
+       }
+       spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+       if (ret) {
+               dev_err(&gb_tty->gbphy_dev->dev,
+                       "invalid number of incoming credits: %d\n",
+                       incoming_credits);
+               return ret;
+       }
+
+       if (!gb_tty->close_pending)
+               schedule_work(&gb_tty->tx_work);
+
+       /*
+        * the port the tty layer may be waiting for credits
+        */
+       tty_port_tty_wakeup(&gb_tty->port);
+
+       if (gb_tty->credits == GB_UART_FIRMWARE_CREDITS)
+               complete(&gb_tty->credits_complete);
+
+       return ret;
+}
+
+static int gb_uart_request_handler(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gb_tty *gb_tty = gb_connection_get_data(connection);
+       int type = op->type;
+       int ret;
+
+       switch (type) {
+       case GB_UART_TYPE_RECEIVE_DATA:
+               ret = gb_uart_receive_data_handler(op);
+               break;
+       case GB_UART_TYPE_SERIAL_STATE:
+               ret = gb_uart_serial_state_handler(op);
+               break;
+       case GB_UART_TYPE_RECEIVE_CREDITS:
+               ret = gb_uart_receive_credits_handler(op);
+               break;
+       default:
+               dev_err(&gb_tty->gbphy_dev->dev,
+                       "unsupported unsolicited request: 0x%02x\n", type);
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+static void  gb_uart_tx_write_work(struct work_struct *work)
+{
+       struct gb_uart_send_data_request *request;
+       struct gb_tty *gb_tty;
+       unsigned long flags;
+       unsigned int send_size;
+       int ret;
+
+       gb_tty = container_of(work, struct gb_tty, tx_work);
+       request = gb_tty->buffer;
+
+       while (1) {
+               if (gb_tty->close_pending)
+                       break;
+
+               spin_lock_irqsave(&gb_tty->write_lock, flags);
+               send_size = gb_tty->buffer_payload_max;
+               if (send_size > gb_tty->credits)
+                       send_size = gb_tty->credits;
+
+               send_size = kfifo_out_peek(&gb_tty->write_fifo,
+                                       &request->data[0],
+                                       send_size);
+               if (!send_size) {
+                       spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+                       break;
+               }
+
+               gb_tty->credits -= send_size;
+               spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+               request->size = cpu_to_le16(send_size);
+               ret = gb_operation_sync(gb_tty->connection,
+                                       GB_UART_TYPE_SEND_DATA,
+                                       request, sizeof(*request) + send_size,
+                                       NULL, 0);
+               if (ret) {
+                       dev_err(&gb_tty->gbphy_dev->dev,
+                               "send data error: %d\n", ret);
+                       spin_lock_irqsave(&gb_tty->write_lock, flags);
+                       gb_tty->credits += send_size;
+                       spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+                       if (!gb_tty->close_pending)
+                               schedule_work(work);
+                       return;
+               }
+
+               spin_lock_irqsave(&gb_tty->write_lock, flags);
+               ret = kfifo_out(&gb_tty->write_fifo, &request->data[0],
+                               send_size);
+               spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+               tty_port_tty_wakeup(&gb_tty->port);
+       }
+}
+
+static int send_line_coding(struct gb_tty *tty)
+{
+       struct gb_uart_set_line_coding_request request;
+
+       memcpy(&request, &tty->line_coding,
+              sizeof(tty->line_coding));
+       return gb_operation_sync(tty->connection, GB_UART_TYPE_SET_LINE_CODING,
+                                &request, sizeof(request), NULL, 0);
+}
+
+static int send_control(struct gb_tty *gb_tty, u8 control)
+{
+       struct gb_uart_set_control_line_state_request request;
+
+       request.control = control;
+       return gb_operation_sync(gb_tty->connection,
+                                GB_UART_TYPE_SET_CONTROL_LINE_STATE,
+                                &request, sizeof(request), NULL, 0);
+}
+
+static int send_break(struct gb_tty *gb_tty, u8 state)
+{
+       struct gb_uart_set_break_request request;
+
+       if ((state != 0) && (state != 1)) {
+               dev_err(&gb_tty->gbphy_dev->dev,
+                       "invalid break state of %d\n", state);
+               return -EINVAL;
+       }
+
+       request.state = state;
+       return gb_operation_sync(gb_tty->connection, GB_UART_TYPE_SEND_BREAK,
+                                &request, sizeof(request), NULL, 0);
+}
+
+static int gb_uart_wait_for_all_credits(struct gb_tty *gb_tty)
+{
+       int ret;
+
+       if (gb_tty->credits == GB_UART_FIRMWARE_CREDITS)
+               return 0;
+
+       ret = wait_for_completion_timeout(&gb_tty->credits_complete,
+                       msecs_to_jiffies(GB_UART_CREDIT_WAIT_TIMEOUT_MSEC));
+       if (!ret) {
+               dev_err(&gb_tty->gbphy_dev->dev,
+                       "time out waiting for credits\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static int gb_uart_flush(struct gb_tty *gb_tty, u8 flags)
+{
+       struct gb_uart_serial_flush_request request;
+
+       request.flags = flags;
+       return gb_operation_sync(gb_tty->connection, GB_UART_TYPE_FLUSH_FIFOS,
+                                &request, sizeof(request), NULL, 0);
+}
+
+static struct gb_tty *get_gb_by_minor(unsigned minor)
+{
+       struct gb_tty *gb_tty;
+
+       mutex_lock(&table_lock);
+       gb_tty = idr_find(&tty_minors, minor);
+       if (gb_tty) {
+               mutex_lock(&gb_tty->mutex);
+               if (gb_tty->disconnected) {
+                       mutex_unlock(&gb_tty->mutex);
+                       gb_tty = NULL;
+               } else {
+                       tty_port_get(&gb_tty->port);
+                       mutex_unlock(&gb_tty->mutex);
+               }
+       }
+       mutex_unlock(&table_lock);
+       return gb_tty;
+}
+
+static int alloc_minor(struct gb_tty *gb_tty)
+{
+       int minor;
+
+       mutex_lock(&table_lock);
+       minor = idr_alloc(&tty_minors, gb_tty, 0, GB_NUM_MINORS, GFP_KERNEL);
+       mutex_unlock(&table_lock);
+       if (minor >= 0)
+               gb_tty->minor = minor;
+       return minor;
+}
+
+static void release_minor(struct gb_tty *gb_tty)
+{
+       int minor = gb_tty->minor;
+
+       gb_tty->minor = 0;      /* Maybe should use an invalid value instead */
+       mutex_lock(&table_lock);
+       idr_remove(&tty_minors, minor);
+       mutex_unlock(&table_lock);
+}
+
+static int gb_tty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+       struct gb_tty *gb_tty;
+       int retval;
+
+       gb_tty = get_gb_by_minor(tty->index);
+       if (!gb_tty)
+               return -ENODEV;
+
+       retval = tty_standard_install(driver, tty);
+       if (retval)
+               goto error;
+
+       tty->driver_data = gb_tty;
+       return 0;
+error:
+       tty_port_put(&gb_tty->port);
+       return retval;
+}
+
+static int gb_tty_open(struct tty_struct *tty, struct file *file)
+{
+       struct gb_tty *gb_tty = tty->driver_data;
+
+       return tty_port_open(&gb_tty->port, tty, file);
+}
+
+static void gb_tty_close(struct tty_struct *tty, struct file *file)
+{
+       struct gb_tty *gb_tty = tty->driver_data;
+
+       tty_port_close(&gb_tty->port, tty, file);
+}
+
+static void gb_tty_cleanup(struct tty_struct *tty)
+{
+       struct gb_tty *gb_tty = tty->driver_data;
+
+       tty_port_put(&gb_tty->port);
+}
+
+static void gb_tty_hangup(struct tty_struct *tty)
+{
+       struct gb_tty *gb_tty = tty->driver_data;
+
+       tty_port_hangup(&gb_tty->port);
+}
+
+static int gb_tty_write(struct tty_struct *tty, const unsigned char *buf,
+                       int count)
+{
+       struct gb_tty *gb_tty = tty->driver_data;
+
+       count =  kfifo_in_spinlocked(&gb_tty->write_fifo, buf, count,
+                                       &gb_tty->write_lock);
+       if (count && !gb_tty->close_pending)
+               schedule_work(&gb_tty->tx_work);
+
+       return count;
+}
+
+static int gb_tty_write_room(struct tty_struct *tty)
+{
+       struct gb_tty *gb_tty = tty->driver_data;
+       unsigned long flags;
+       int room;
+
+       spin_lock_irqsave(&gb_tty->write_lock, flags);
+       room = kfifo_avail(&gb_tty->write_fifo);
+       spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+       room -= GB_UART_WRITE_ROOM_MARGIN;
+       if (room < 0)
+               return 0;
+
+       return room;
+}
+
+static int gb_tty_chars_in_buffer(struct tty_struct *tty)
+{
+       struct gb_tty *gb_tty = tty->driver_data;
+       unsigned long flags;
+       int chars;
+
+       spin_lock_irqsave(&gb_tty->write_lock, flags);
+       chars = kfifo_len(&gb_tty->write_fifo);
+       if (gb_tty->credits < GB_UART_FIRMWARE_CREDITS)
+               chars += GB_UART_FIRMWARE_CREDITS - gb_tty->credits;
+       spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+       return chars;
+}
+
+static int gb_tty_break_ctl(struct tty_struct *tty, int state)
+{
+       struct gb_tty *gb_tty = tty->driver_data;
+
+       return send_break(gb_tty, state ? 1 : 0);
+}
+
+static void gb_tty_set_termios(struct tty_struct *tty,
+                              struct ktermios *termios_old)
+{
+       struct gb_tty *gb_tty = tty->driver_data;
+       struct ktermios *termios = &tty->termios;
+       struct gb_tty_line_coding newline;
+       u8 newctrl = gb_tty->ctrlout;
+
+       newline.rate = cpu_to_le32(tty_get_baud_rate(tty));
+       newline.format = termios->c_cflag & CSTOPB ?
+                               GB_SERIAL_2_STOP_BITS : GB_SERIAL_1_STOP_BITS;
+       newline.parity = termios->c_cflag & PARENB ?
+                               (termios->c_cflag & PARODD ? 1 : 2) +
+                               (termios->c_cflag & CMSPAR ? 2 : 0) : 0;
+
+       switch (termios->c_cflag & CSIZE) {
+       case CS5:
+               newline.data_bits = 5;
+               break;
+       case CS6:
+               newline.data_bits = 6;
+               break;
+       case CS7:
+               newline.data_bits = 7;
+               break;
+       case CS8:
+       default:
+               newline.data_bits = 8;
+               break;
+       }
+
+       /* FIXME: needs to clear unsupported bits in the termios */
+       gb_tty->clocal = ((termios->c_cflag & CLOCAL) != 0);
+
+       if (C_BAUD(tty) == B0) {
+               newline.rate = gb_tty->line_coding.rate;
+               newctrl &= ~(GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
+       } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
+               newctrl |= (GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
+       }
+
+       if (newctrl != gb_tty->ctrlout) {
+               gb_tty->ctrlout = newctrl;
+               send_control(gb_tty, newctrl);
+       }
+
+       if (C_CRTSCTS(tty) && C_BAUD(tty) != B0)
+               newline.flow_control |= GB_SERIAL_AUTO_RTSCTS_EN;
+       else
+               newline.flow_control &= ~GB_SERIAL_AUTO_RTSCTS_EN;
+
+       if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) {
+               memcpy(&gb_tty->line_coding, &newline, sizeof(newline));
+               send_line_coding(gb_tty);
+       }
+}
+
+static int gb_tty_tiocmget(struct tty_struct *tty)
+{
+       struct gb_tty *gb_tty = tty->driver_data;
+
+       return (gb_tty->ctrlout & GB_UART_CTRL_DTR ? TIOCM_DTR : 0) |
+              (gb_tty->ctrlout & GB_UART_CTRL_RTS ? TIOCM_RTS : 0) |
+              (gb_tty->ctrlin  & GB_UART_CTRL_DSR ? TIOCM_DSR : 0) |
+              (gb_tty->ctrlin  & GB_UART_CTRL_RI  ? TIOCM_RI  : 0) |
+              (gb_tty->ctrlin  & GB_UART_CTRL_DCD ? TIOCM_CD  : 0) |
+              TIOCM_CTS;
+}
+
+static int gb_tty_tiocmset(struct tty_struct *tty, unsigned int set,
+                          unsigned int clear)
+{
+       struct gb_tty *gb_tty = tty->driver_data;
+       u8 newctrl = gb_tty->ctrlout;
+
+       set = (set & TIOCM_DTR ? GB_UART_CTRL_DTR : 0) |
+             (set & TIOCM_RTS ? GB_UART_CTRL_RTS : 0);
+       clear = (clear & TIOCM_DTR ? GB_UART_CTRL_DTR : 0) |
+               (clear & TIOCM_RTS ? GB_UART_CTRL_RTS : 0);
+
+       newctrl = (newctrl & ~clear) | set;
+       if (gb_tty->ctrlout == newctrl)
+               return 0;
+
+       gb_tty->ctrlout = newctrl;
+       return send_control(gb_tty, newctrl);
+}
+
+static void gb_tty_throttle(struct tty_struct *tty)
+{
+       struct gb_tty *gb_tty = tty->driver_data;
+       unsigned char stop_char;
+       int retval;
+
+       if (I_IXOFF(tty)) {
+               stop_char = STOP_CHAR(tty);
+               retval = gb_tty_write(tty, &stop_char, 1);
+               if (retval <= 0)
+                       return;
+       }
+
+       if (tty->termios.c_cflag & CRTSCTS) {
+               gb_tty->ctrlout &= ~GB_UART_CTRL_RTS;
+               retval = send_control(gb_tty, gb_tty->ctrlout);
+       }
+}
+
+static void gb_tty_unthrottle(struct tty_struct *tty)
+{
+       struct gb_tty *gb_tty = tty->driver_data;
+       unsigned char start_char;
+       int retval;
+
+       if (I_IXOFF(tty)) {
+               start_char = START_CHAR(tty);
+               retval = gb_tty_write(tty, &start_char, 1);
+               if (retval <= 0)
+                       return;
+       }
+
+       if (tty->termios.c_cflag & CRTSCTS) {
+               gb_tty->ctrlout |= GB_UART_CTRL_RTS;
+               retval = send_control(gb_tty, gb_tty->ctrlout);
+       }
+}
+
+static int get_serial_info(struct gb_tty *gb_tty,
+                          struct serial_struct __user *info)
+{
+       struct serial_struct tmp;
+
+       if (!info)
+               return -EINVAL;
+
+       memset(&tmp, 0, sizeof(tmp));
+       tmp.flags = ASYNC_LOW_LATENCY | ASYNC_SKIP_TEST;
+       tmp.type = PORT_16550A;
+       tmp.line = gb_tty->minor;
+       tmp.xmit_fifo_size = 16;
+       tmp.baud_base = 9600;
+       tmp.close_delay = gb_tty->port.close_delay / 10;
+       tmp.closing_wait = gb_tty->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+                               ASYNC_CLOSING_WAIT_NONE : gb_tty->port.closing_wait / 10;
+
+       if (copy_to_user(info, &tmp, sizeof(tmp)))
+               return -EFAULT;
+       return 0;
+}
+
+static int set_serial_info(struct gb_tty *gb_tty,
+                          struct serial_struct __user *newinfo)
+{
+       struct serial_struct new_serial;
+       unsigned int closing_wait;
+       unsigned int close_delay;
+       int retval = 0;
+
+       if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
+               return -EFAULT;
+
+       close_delay = new_serial.close_delay * 10;
+       closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+                       ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
+
+       mutex_lock(&gb_tty->port.mutex);
+       if (!capable(CAP_SYS_ADMIN)) {
+               if ((close_delay != gb_tty->port.close_delay) ||
+                   (closing_wait != gb_tty->port.closing_wait))
+                       retval = -EPERM;
+               else
+                       retval = -EOPNOTSUPP;
+       } else {
+               gb_tty->port.close_delay = close_delay;
+               gb_tty->port.closing_wait = closing_wait;
+       }
+       mutex_unlock(&gb_tty->port.mutex);
+       return retval;
+}
+
+static int wait_serial_change(struct gb_tty *gb_tty, unsigned long arg)
+{
+       int retval = 0;
+       DECLARE_WAITQUEUE(wait, current);
+       struct async_icount old;
+       struct async_icount new;
+
+       if (!(arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD)))
+               return -EINVAL;
+
+       do {
+               spin_lock_irq(&gb_tty->read_lock);
+               old = gb_tty->oldcount;
+               new = gb_tty->iocount;
+               gb_tty->oldcount = new;
+               spin_unlock_irq(&gb_tty->read_lock);
+
+               if ((arg & TIOCM_DSR) && (old.dsr != new.dsr))
+                       break;
+               if ((arg & TIOCM_CD) && (old.dcd != new.dcd))
+                       break;
+               if ((arg & TIOCM_RI) && (old.rng != new.rng))
+                       break;
+
+               add_wait_queue(&gb_tty->wioctl, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule();
+               remove_wait_queue(&gb_tty->wioctl, &wait);
+               if (gb_tty->disconnected) {
+                       if (arg & TIOCM_CD)
+                               break;
+                       retval = -ENODEV;
+               } else if (signal_pending(current)) {
+                       retval = -ERESTARTSYS;
+               }
+       } while (!retval);
+
+       return retval;
+}
+
+static int get_serial_usage(struct gb_tty *gb_tty,
+                           struct serial_icounter_struct __user *count)
+{
+       struct serial_icounter_struct icount;
+       int retval = 0;
+
+       memset(&icount, 0, sizeof(icount));
+       icount.dsr = gb_tty->iocount.dsr;
+       icount.rng = gb_tty->iocount.rng;
+       icount.dcd = gb_tty->iocount.dcd;
+       icount.frame = gb_tty->iocount.frame;
+       icount.overrun = gb_tty->iocount.overrun;
+       icount.parity = gb_tty->iocount.parity;
+       icount.brk = gb_tty->iocount.brk;
+
+       if (copy_to_user(count, &icount, sizeof(icount)) > 0)
+               retval = -EFAULT;
+
+       return retval;
+}
+
+static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+                       unsigned long arg)
+{
+       struct gb_tty *gb_tty = tty->driver_data;
+
+       switch (cmd) {
+       case TIOCGSERIAL:
+               return get_serial_info(gb_tty,
+                                      (struct serial_struct __user *)arg);
+       case TIOCSSERIAL:
+               return set_serial_info(gb_tty,
+                                      (struct serial_struct __user *)arg);
+       case TIOCMIWAIT:
+               return wait_serial_change(gb_tty, arg);
+       case TIOCGICOUNT:
+               return get_serial_usage(gb_tty,
+                                       (struct serial_icounter_struct __user *)arg);
+       }
+
+       return -ENOIOCTLCMD;
+}
+
+static void gb_tty_dtr_rts(struct tty_port *port, int on)
+{
+       struct gb_tty *gb_tty;
+       u8 newctrl;
+
+       gb_tty = container_of(port, struct gb_tty, port);
+       newctrl = gb_tty->ctrlout;
+
+       if (on)
+               newctrl |= (GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
+       else
+               newctrl &= ~(GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
+
+       gb_tty->ctrlout = newctrl;
+       send_control(gb_tty, newctrl);
+}
+
+static int gb_tty_port_activate(struct tty_port *port,
+                               struct tty_struct *tty)
+{
+       struct gb_tty *gb_tty;
+
+       gb_tty = container_of(port, struct gb_tty, port);
+
+       return gbphy_runtime_get_sync(gb_tty->gbphy_dev);
+}
+
+static void gb_tty_port_shutdown(struct tty_port *port)
+{
+       struct gb_tty *gb_tty;
+       unsigned long flags;
+       int ret;
+
+       gb_tty = container_of(port, struct gb_tty, port);
+
+       gb_tty->close_pending = true;
+
+       cancel_work_sync(&gb_tty->tx_work);
+
+       spin_lock_irqsave(&gb_tty->write_lock, flags);
+       kfifo_reset_out(&gb_tty->write_fifo);
+       spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+       if (gb_tty->credits == GB_UART_FIRMWARE_CREDITS)
+               goto out;
+
+       ret = gb_uart_flush(gb_tty, GB_SERIAL_FLAG_FLUSH_TRANSMITTER);
+       if (ret) {
+               dev_err(&gb_tty->gbphy_dev->dev,
+                       "error flushing transmitter: %d\n", ret);
+       }
+
+       gb_uart_wait_for_all_credits(gb_tty);
+
+out:
+       gb_tty->close_pending = false;
+
+       gbphy_runtime_put_autosuspend(gb_tty->gbphy_dev);
+}
+
+static const struct tty_operations gb_ops = {
+       .install =              gb_tty_install,
+       .open =                 gb_tty_open,
+       .close =                gb_tty_close,
+       .cleanup =              gb_tty_cleanup,
+       .hangup =               gb_tty_hangup,
+       .write =                gb_tty_write,
+       .write_room =           gb_tty_write_room,
+       .ioctl =                gb_tty_ioctl,
+       .throttle =             gb_tty_throttle,
+       .unthrottle =           gb_tty_unthrottle,
+       .chars_in_buffer =      gb_tty_chars_in_buffer,
+       .break_ctl =            gb_tty_break_ctl,
+       .set_termios =          gb_tty_set_termios,
+       .tiocmget =             gb_tty_tiocmget,
+       .tiocmset =             gb_tty_tiocmset,
+};
+
+static struct tty_port_operations gb_port_ops = {
+       .dtr_rts =              gb_tty_dtr_rts,
+       .activate =             gb_tty_port_activate,
+       .shutdown =             gb_tty_port_shutdown,
+};
+
+static int gb_uart_probe(struct gbphy_device *gbphy_dev,
+                        const struct gbphy_device_id *id)
+{
+       struct gb_connection *connection;
+       size_t max_payload;
+       struct gb_tty *gb_tty;
+       struct device *tty_dev;
+       int retval;
+       int minor;
+
+       gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
+       if (!gb_tty)
+               return -ENOMEM;
+
+       connection = gb_connection_create(gbphy_dev->bundle,
+                                         le16_to_cpu(gbphy_dev->cport_desc->id),
+                                         gb_uart_request_handler);
+       if (IS_ERR(connection)) {
+               retval = PTR_ERR(connection);
+               goto exit_tty_free;
+       }
+
+       max_payload = gb_operation_get_payload_size_max(connection);
+       if (max_payload < sizeof(struct gb_uart_send_data_request)) {
+               retval = -EINVAL;
+               goto exit_connection_destroy;
+       }
+
+       gb_tty->buffer_payload_max = max_payload -
+                       sizeof(struct gb_uart_send_data_request);
+
+       gb_tty->buffer = kzalloc(gb_tty->buffer_payload_max, GFP_KERNEL);
+       if (!gb_tty->buffer) {
+               retval = -ENOMEM;
+               goto exit_connection_destroy;
+       }
+
+       INIT_WORK(&gb_tty->tx_work, gb_uart_tx_write_work);
+
+       retval = kfifo_alloc(&gb_tty->write_fifo, GB_UART_WRITE_FIFO_SIZE,
+                               GFP_KERNEL);
+       if (retval)
+               goto exit_buf_free;
+
+       gb_tty->credits = GB_UART_FIRMWARE_CREDITS;
+       init_completion(&gb_tty->credits_complete);
+
+       minor = alloc_minor(gb_tty);
+       if (minor < 0) {
+               if (minor == -ENOSPC) {
+                       dev_err(&connection->bundle->dev,
+                               "no more free minor numbers\n");
+                       retval = -ENODEV;
+               } else {
+                       retval = minor;
+               }
+               goto exit_kfifo_free;
+       }
+
+       gb_tty->minor = minor;
+       spin_lock_init(&gb_tty->write_lock);
+       spin_lock_init(&gb_tty->read_lock);
+       init_waitqueue_head(&gb_tty->wioctl);
+       mutex_init(&gb_tty->mutex);
+
+       tty_port_init(&gb_tty->port);
+       gb_tty->port.ops = &gb_port_ops;
+
+       gb_tty->connection = connection;
+       gb_tty->gbphy_dev = gbphy_dev;
+       gb_connection_set_data(connection, gb_tty);
+       gb_gbphy_set_data(gbphy_dev, gb_tty);
+
+       retval = gb_connection_enable_tx(connection);
+       if (retval)
+               goto exit_release_minor;
+
+       send_control(gb_tty, gb_tty->ctrlout);
+
+       /* initialize the uart to be 9600n81 */
+       gb_tty->line_coding.rate = cpu_to_le32(9600);
+       gb_tty->line_coding.format = GB_SERIAL_1_STOP_BITS;
+       gb_tty->line_coding.parity = GB_SERIAL_NO_PARITY;
+       gb_tty->line_coding.data_bits = 8;
+       send_line_coding(gb_tty);
+
+       retval = gb_connection_enable(connection);
+       if (retval)
+               goto exit_connection_disable;
+
+       tty_dev = tty_port_register_device(&gb_tty->port, gb_tty_driver, minor,
+                                          &gbphy_dev->dev);
+       if (IS_ERR(tty_dev)) {
+               retval = PTR_ERR(tty_dev);
+               goto exit_connection_disable;
+       }
+
+       gbphy_runtime_put_autosuspend(gbphy_dev);
+       return 0;
+
+exit_connection_disable:
+       gb_connection_disable(connection);
+exit_release_minor:
+       release_minor(gb_tty);
+exit_kfifo_free:
+       kfifo_free(&gb_tty->write_fifo);
+exit_buf_free:
+       kfree(gb_tty->buffer);
+exit_connection_destroy:
+       gb_connection_destroy(connection);
+exit_tty_free:
+       kfree(gb_tty);
+
+       return retval;
+}
+
+static void gb_uart_remove(struct gbphy_device *gbphy_dev)
+{
+       struct gb_tty *gb_tty = gb_gbphy_get_data(gbphy_dev);
+       struct gb_connection *connection = gb_tty->connection;
+       struct tty_struct *tty;
+       int ret;
+
+       ret = gbphy_runtime_get_sync(gbphy_dev);
+       if (ret)
+               gbphy_runtime_get_noresume(gbphy_dev);
+
+       mutex_lock(&gb_tty->mutex);
+       gb_tty->disconnected = true;
+
+       wake_up_all(&gb_tty->wioctl);
+       mutex_unlock(&gb_tty->mutex);
+
+       tty = tty_port_tty_get(&gb_tty->port);
+       if (tty) {
+               tty_vhangup(tty);
+               tty_kref_put(tty);
+       }
+
+       gb_connection_disable_rx(connection);
+       tty_unregister_device(gb_tty_driver, gb_tty->minor);
+
+       /* FIXME - free transmit / receive buffers */
+
+       gb_connection_disable(connection);
+       tty_port_destroy(&gb_tty->port);
+       gb_connection_destroy(connection);
+       release_minor(gb_tty);
+       kfifo_free(&gb_tty->write_fifo);
+       kfree(gb_tty->buffer);
+       kfree(gb_tty);
+}
+
+static int gb_tty_init(void)
+{
+       int retval = 0;
+
+       gb_tty_driver = tty_alloc_driver(GB_NUM_MINORS, 0);
+       if (IS_ERR(gb_tty_driver)) {
+               pr_err("Can not allocate tty driver\n");
+               retval = -ENOMEM;
+               goto fail_unregister_dev;
+       }
+
+       gb_tty_driver->driver_name = "gb";
+       gb_tty_driver->name = GB_NAME;
+       gb_tty_driver->major = 0;
+       gb_tty_driver->minor_start = 0;
+       gb_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+       gb_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+       gb_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+       gb_tty_driver->init_termios = tty_std_termios;
+       gb_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+       tty_set_operations(gb_tty_driver, &gb_ops);
+
+       retval = tty_register_driver(gb_tty_driver);
+       if (retval) {
+               pr_err("Can not register tty driver: %d\n", retval);
+               goto fail_put_gb_tty;
+       }
+
+       return 0;
+
+fail_put_gb_tty:
+       put_tty_driver(gb_tty_driver);
+fail_unregister_dev:
+       return retval;
+}
+
+static void gb_tty_exit(void)
+{
+       tty_unregister_driver(gb_tty_driver);
+       put_tty_driver(gb_tty_driver);
+       idr_destroy(&tty_minors);
+}
+
+static const struct gbphy_device_id gb_uart_id_table[] = {
+       { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_UART) },
+       { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_uart_id_table);
+
+static struct gbphy_driver uart_driver = {
+       .name           = "uart",
+       .probe          = gb_uart_probe,
+       .remove         = gb_uart_remove,
+       .id_table       = gb_uart_id_table,
+};
+
+static int gb_uart_driver_init(void)
+{
+       int ret;
+
+       ret = gb_tty_init();
+       if (ret)
+               return ret;
+
+       ret = gb_gbphy_register(&uart_driver);
+       if (ret) {
+               gb_tty_exit();
+               return ret;
+       }
+
+       return 0;
+}
+module_init(gb_uart_driver_init);
+
+static void gb_uart_driver_exit(void)
+{
+       gb_gbphy_deregister(&uart_driver);
+       gb_tty_exit();
+}
+
+module_exit(gb_uart_driver_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/usb.c b/drivers/staging/greybus/usb.c
new file mode 100644 (file)
index 0000000..ccadda0
--- /dev/null
@@ -0,0 +1,247 @@
+/*
+ * USB host driver for the Greybus "generic" USB module.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+
+/* Greybus USB request types */
+#define GB_USB_TYPE_HCD_START          0x02
+#define GB_USB_TYPE_HCD_STOP           0x03
+#define GB_USB_TYPE_HUB_CONTROL                0x04
+
+struct gb_usb_hub_control_request {
+       __le16 typeReq;
+       __le16 wValue;
+       __le16 wIndex;
+       __le16 wLength;
+};
+
+struct gb_usb_hub_control_response {
+       u8 buf[0];
+};
+
+struct gb_usb_device {
+       struct gb_connection *connection;
+       struct gbphy_device *gbphy_dev;
+};
+
+static inline struct gb_usb_device *to_gb_usb_device(struct usb_hcd *hcd)
+{
+       return (struct gb_usb_device *)hcd->hcd_priv;
+}
+
+static inline struct usb_hcd *gb_usb_device_to_hcd(struct gb_usb_device *dev)
+{
+       return container_of((void *)dev, struct usb_hcd, hcd_priv);
+}
+
+static void hcd_stop(struct usb_hcd *hcd)
+{
+       struct gb_usb_device *dev = to_gb_usb_device(hcd);
+       int ret;
+
+       ret = gb_operation_sync(dev->connection, GB_USB_TYPE_HCD_STOP,
+                               NULL, 0, NULL, 0);
+       if (ret)
+               dev_err(&dev->gbphy_dev->dev, "HCD stop failed '%d'\n", ret);
+}
+
+static int hcd_start(struct usb_hcd *hcd)
+{
+       struct usb_bus *bus = hcd_to_bus(hcd);
+       struct gb_usb_device *dev = to_gb_usb_device(hcd);
+       int ret;
+
+       ret = gb_operation_sync(dev->connection, GB_USB_TYPE_HCD_START,
+                               NULL, 0, NULL, 0);
+       if (ret) {
+               dev_err(&dev->gbphy_dev->dev, "HCD start failed '%d'\n", ret);
+               return ret;
+       }
+
+       hcd->state = HC_STATE_RUNNING;
+       if (bus->root_hub)
+               usb_hcd_resume_root_hub(hcd);
+       return 0;
+}
+
+static int urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+       return -ENXIO;
+}
+
+static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+       return -ENXIO;
+}
+
+static int get_frame_number(struct usb_hcd *hcd)
+{
+       return 0;
+}
+
+static int hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+       return 0;
+}
+
+static int hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
+                      char *buf, u16 wLength)
+{
+       struct gb_usb_device *dev = to_gb_usb_device(hcd);
+       struct gb_operation *operation;
+       struct gb_usb_hub_control_request *request;
+       struct gb_usb_hub_control_response *response;
+       size_t response_size;
+       int ret;
+
+       /* FIXME: handle unspecified lengths */
+       response_size = sizeof(*response) + wLength;
+
+       operation = gb_operation_create(dev->connection,
+                                       GB_USB_TYPE_HUB_CONTROL,
+                                       sizeof(*request),
+                                       response_size,
+                                       GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       request = operation->request->payload;
+       request->typeReq = cpu_to_le16(typeReq);
+       request->wValue = cpu_to_le16(wValue);
+       request->wIndex = cpu_to_le16(wIndex);
+       request->wLength = cpu_to_le16(wLength);
+
+       ret = gb_operation_request_send_sync(operation);
+       if (ret)
+               goto out;
+
+       if (wLength) {
+               /* Greybus core has verified response size */
+               response = operation->response->payload;
+               memcpy(buf, response->buf, wLength);
+       }
+out:
+       gb_operation_put(operation);
+
+       return ret;
+}
+
+static struct hc_driver usb_gb_hc_driver = {
+       .description = "greybus-hcd",
+       .product_desc = "Greybus USB Host Controller",
+       .hcd_priv_size = sizeof(struct gb_usb_device),
+
+       .flags = HCD_USB2,
+
+       .start = hcd_start,
+       .stop = hcd_stop,
+
+       .urb_enqueue = urb_enqueue,
+       .urb_dequeue = urb_dequeue,
+
+       .get_frame_number = get_frame_number,
+       .hub_status_data = hub_status_data,
+       .hub_control = hub_control,
+};
+
+static int gb_usb_probe(struct gbphy_device *gbphy_dev,
+                       const struct gbphy_device_id *id)
+{
+       struct gb_connection *connection;
+       struct device *dev = &gbphy_dev->dev;
+       struct gb_usb_device *gb_usb_dev;
+       struct usb_hcd *hcd;
+       int retval;
+
+       hcd = usb_create_hcd(&usb_gb_hc_driver, dev, dev_name(dev));
+       if (!hcd)
+               return -ENOMEM;
+
+       connection = gb_connection_create(gbphy_dev->bundle,
+                                         le16_to_cpu(gbphy_dev->cport_desc->id),
+                                         NULL);
+       if (IS_ERR(connection)) {
+               retval = PTR_ERR(connection);
+               goto exit_usb_put;
+       }
+
+       gb_usb_dev = to_gb_usb_device(hcd);
+       gb_usb_dev->connection = connection;
+       gb_connection_set_data(connection, gb_usb_dev);
+       gb_usb_dev->gbphy_dev = gbphy_dev;
+       gb_gbphy_set_data(gbphy_dev, gb_usb_dev);
+
+       hcd->has_tt = 1;
+
+       retval = gb_connection_enable(connection);
+       if (retval)
+               goto exit_connection_destroy;
+
+       /*
+        * FIXME: The USB bridged-PHY protocol driver depends on changes to
+        *        USB core which are not yet upstream.
+        *
+        *        Disable for now.
+        */
+       if (1) {
+               dev_warn(dev, "USB protocol disabled\n");
+               retval = -EPROTONOSUPPORT;
+               goto exit_connection_disable;
+       }
+
+       retval = usb_add_hcd(hcd, 0, 0);
+       if (retval)
+               goto exit_connection_disable;
+
+       return 0;
+
+exit_connection_disable:
+       gb_connection_disable(connection);
+exit_connection_destroy:
+       gb_connection_destroy(connection);
+exit_usb_put:
+       usb_put_hcd(hcd);
+
+       return retval;
+}
+
+static void gb_usb_remove(struct gbphy_device *gbphy_dev)
+{
+       struct gb_usb_device *gb_usb_dev = gb_gbphy_get_data(gbphy_dev);
+       struct gb_connection *connection = gb_usb_dev->connection;
+       struct usb_hcd *hcd = gb_usb_device_to_hcd(gb_usb_dev);
+
+       usb_remove_hcd(hcd);
+       gb_connection_disable(connection);
+       gb_connection_destroy(connection);
+       usb_put_hcd(hcd);
+}
+
+static const struct gbphy_device_id gb_usb_id_table[] = {
+       { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_USB) },
+       { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_usb_id_table);
+
+static struct gbphy_driver usb_driver = {
+       .name           = "usb",
+       .probe          = gb_usb_probe,
+       .remove         = gb_usb_remove,
+       .id_table       = gb_usb_id_table,
+};
+
+module_gbphy_driver(usb_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/vibrator.c b/drivers/staging/greybus/vibrator.c
new file mode 100644 (file)
index 0000000..7296a4d
--- /dev/null
@@ -0,0 +1,267 @@
+/*
+ * Greybus Vibrator protocol driver.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+#include <linux/idr.h>
+#include <linux/pm_runtime.h>
+
+#include "greybus.h"
+
+struct gb_vibrator_device {
+       struct gb_connection    *connection;
+       struct device           *dev;
+       int                     minor;          /* vibrator minor number */
+       struct delayed_work     delayed_work;
+};
+
+/* Greybus Vibrator operation types */
+#define        GB_VIBRATOR_TYPE_ON                     0x02
+#define        GB_VIBRATOR_TYPE_OFF                    0x03
+
+static int turn_off(struct gb_vibrator_device *vib)
+{
+       struct gb_bundle *bundle = vib->connection->bundle;
+       int ret;
+
+       ret = gb_operation_sync(vib->connection, GB_VIBRATOR_TYPE_OFF,
+                       NULL, 0, NULL, 0);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return ret;
+}
+
+static int turn_on(struct gb_vibrator_device *vib, u16 timeout_ms)
+{
+       struct gb_bundle *bundle = vib->connection->bundle;
+       int ret;
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret)
+               return ret;
+
+       /* Vibrator was switched ON earlier */
+       if (cancel_delayed_work_sync(&vib->delayed_work))
+               turn_off(vib);
+
+       ret = gb_operation_sync(vib->connection, GB_VIBRATOR_TYPE_ON,
+                       NULL, 0, NULL, 0);
+       if (ret) {
+               gb_pm_runtime_put_autosuspend(bundle);
+               return ret;
+       }
+
+       schedule_delayed_work(&vib->delayed_work, msecs_to_jiffies(timeout_ms));
+
+       return 0;
+}
+
+static void gb_vibrator_worker(struct work_struct *work)
+{
+       struct delayed_work *delayed_work = to_delayed_work(work);
+       struct gb_vibrator_device *vib =
+               container_of(delayed_work, struct gb_vibrator_device, delayed_work);
+
+       turn_off(vib);
+}
+
+static ssize_t timeout_store(struct device *dev, struct device_attribute *attr,
+                            const char *buf, size_t count)
+{
+       struct gb_vibrator_device *vib = dev_get_drvdata(dev);
+       unsigned long val;
+       int retval;
+
+       retval = kstrtoul(buf, 10, &val);
+       if (retval < 0) {
+               dev_err(dev, "could not parse timeout value %d\n", retval);
+               return retval;
+       }
+
+       if (val)
+               retval = turn_on(vib, (u16)val);
+       else
+               retval = turn_off(vib);
+       if (retval)
+               return retval;
+
+       return count;
+}
+static DEVICE_ATTR_WO(timeout);
+
+static struct attribute *vibrator_attrs[] = {
+       &dev_attr_timeout.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(vibrator);
+
+static struct class vibrator_class = {
+       .name           = "vibrator",
+       .owner          = THIS_MODULE,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)
+       .dev_groups     = vibrator_groups,
+#endif
+};
+
+static DEFINE_IDA(minors);
+
+static int gb_vibrator_probe(struct gb_bundle *bundle,
+                                       const struct greybus_bundle_id *id)
+{
+       struct greybus_descriptor_cport *cport_desc;
+       struct gb_connection *connection;
+       struct gb_vibrator_device *vib;
+       struct device *dev;
+       int retval;
+
+       if (bundle->num_cports != 1)
+               return -ENODEV;
+
+       cport_desc = &bundle->cport_desc[0];
+       if (cport_desc->protocol_id != GREYBUS_PROTOCOL_VIBRATOR)
+               return -ENODEV;
+
+       vib = kzalloc(sizeof(*vib), GFP_KERNEL);
+       if (!vib)
+               return -ENOMEM;
+
+       connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+                                               NULL);
+       if (IS_ERR(connection)) {
+               retval = PTR_ERR(connection);
+               goto err_free_vib;
+       }
+       gb_connection_set_data(connection, vib);
+
+       vib->connection = connection;
+
+       greybus_set_drvdata(bundle, vib);
+
+       retval = gb_connection_enable(connection);
+       if (retval)
+               goto err_connection_destroy;
+
+       /*
+        * For now we create a device in sysfs for the vibrator, but odds are
+        * there is a "real" device somewhere in the kernel for this, but I
+        * can't find it at the moment...
+        */
+       vib->minor = ida_simple_get(&minors, 0, 0, GFP_KERNEL);
+       if (vib->minor < 0) {
+               retval = vib->minor;
+               goto err_connection_disable;
+       }
+       dev = device_create(&vibrator_class, &bundle->dev,
+                           MKDEV(0, 0), vib, "vibrator%d", vib->minor);
+       if (IS_ERR(dev)) {
+               retval = -EINVAL;
+               goto err_ida_remove;
+       }
+       vib->dev = dev;
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3,11,0)
+       /*
+        * Newer kernels handle this in a race-free manner, by the dev_groups
+        * field in the struct class up above.  But for older kernels, we need
+        * to "open code this :(
+        */
+       retval = sysfs_create_group(&dev->kobj, vibrator_groups[0]);
+       if (retval) {
+               device_unregister(dev);
+               goto err_ida_remove;
+       }
+#endif
+
+       INIT_DELAYED_WORK(&vib->delayed_work, gb_vibrator_worker);
+
+       gb_pm_runtime_put_autosuspend(bundle);
+
+       return 0;
+
+err_ida_remove:
+       ida_simple_remove(&minors, vib->minor);
+err_connection_disable:
+       gb_connection_disable(connection);
+err_connection_destroy:
+       gb_connection_destroy(connection);
+err_free_vib:
+       kfree(vib);
+
+       return retval;
+}
+
+static void gb_vibrator_disconnect(struct gb_bundle *bundle)
+{
+       struct gb_vibrator_device *vib = greybus_get_drvdata(bundle);
+       int ret;
+
+       ret = gb_pm_runtime_get_sync(bundle);
+       if (ret)
+               gb_pm_runtime_get_noresume(bundle);
+
+       if (cancel_delayed_work_sync(&vib->delayed_work))
+               turn_off(vib);
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3,11,0)
+       sysfs_remove_group(&vib->dev->kobj, vibrator_groups[0]);
+#endif
+       device_unregister(vib->dev);
+       ida_simple_remove(&minors, vib->minor);
+       gb_connection_disable(vib->connection);
+       gb_connection_destroy(vib->connection);
+       kfree(vib);
+}
+
+static const struct greybus_bundle_id gb_vibrator_id_table[] = {
+       { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_VIBRATOR) },
+       { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_vibrator_id_table);
+
+static struct greybus_driver gb_vibrator_driver = {
+       .name           = "vibrator",
+       .probe          = gb_vibrator_probe,
+       .disconnect     = gb_vibrator_disconnect,
+       .id_table       = gb_vibrator_id_table,
+};
+
+static __init int gb_vibrator_init(void)
+{
+       int retval;
+
+       retval = class_register(&vibrator_class);
+       if (retval)
+               return retval;
+
+       retval = greybus_register(&gb_vibrator_driver);
+       if (retval)
+               goto err_class_unregister;
+
+       return 0;
+
+err_class_unregister:
+       class_unregister(&vibrator_class);
+
+       return retval;
+}
+module_init(gb_vibrator_init);
+
+static __exit void gb_vibrator_exit(void)
+{
+       greybus_deregister(&gb_vibrator_driver);
+       class_unregister(&vibrator_class);
+       ida_destroy(&minors);
+}
+module_exit(gb_vibrator_exit);
+
+MODULE_LICENSE("GPL v2");