greybus: operation: fix broken activation logic
[cascardo/linux.git] / drivers / staging / greybus / operation.c
index f595b97..b7cc59d 100644 (file)
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
 #include <linux/workqueue.h>
 
 #include "greybus.h"
-
-/* The default amount of time a request is given to complete */
-#define OPERATION_TIMEOUT_DEFAULT      1000    /* milliseconds */
+#include "greybus_trace.h"
 
 static struct kmem_cache *gb_operation_cache;
 static struct kmem_cache *gb_message_cache;
 
 /* Workqueue to handle Greybus operation completions. */
-static struct workqueue_struct *gb_operation_workqueue;
+static struct workqueue_struct *gb_operation_completion_wq;
 
-/* Protects the cookie representing whether a message is in flight */
-static DEFINE_MUTEX(gb_message_mutex);
+/* Wait queue for synchronous cancellations. */
+static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
 
 /*
- * Protects access to connection operations lists, as well as
- * updates to operation->errno.
+ * Protects updates to operation->errno.
  */
 static DEFINE_SPINLOCK(gb_operations_lock);
 
+static int gb_operation_response_send(struct gb_operation *operation,
+                                       int errno);
+
+/*
+ * Increment operation active count and add to connection list unless the
+ * connection is going away.
+ *
+ * Caller holds operation reference.
+ */
+static int gb_operation_get_active(struct gb_operation *operation)
+{
+       struct gb_connection *connection = operation->connection;
+       unsigned long flags;
+
+       spin_lock_irqsave(&connection->lock, flags);
+
+       if (connection->state != GB_CONNECTION_STATE_ENABLED &&
+                       (connection->state != GB_CONNECTION_STATE_ENABLED_TX ||
+                        gb_operation_is_incoming(operation))) {
+               spin_unlock_irqrestore(&connection->lock, flags);
+               return -ENOTCONN;
+       }
+
+       if (operation->active++ == 0)
+               list_add_tail(&operation->links, &connection->operations);
+
+       spin_unlock_irqrestore(&connection->lock, flags);
+
+       return 0;
+}
+
+/* Caller holds operation reference. */
+static void gb_operation_put_active(struct gb_operation *operation)
+{
+       struct gb_connection *connection = operation->connection;
+       unsigned long flags;
+
+       spin_lock_irqsave(&connection->lock, flags);
+       if (--operation->active == 0) {
+               list_del(&operation->links);
+               if (atomic_read(&operation->waiters))
+                       wake_up(&gb_operation_cancellation_queue);
+       }
+       spin_unlock_irqrestore(&connection->lock, flags);
+}
+
+static bool gb_operation_is_active(struct gb_operation *operation)
+{
+       struct gb_connection *connection = operation->connection;
+       unsigned long flags;
+       bool ret;
+
+       spin_lock_irqsave(&connection->lock, flags);
+       ret = operation->active;
+       spin_unlock_irqrestore(&connection->lock, flags);
+
+       return ret;
+}
+
 /*
  * Set an operation's result.
  *
@@ -114,42 +172,39 @@ int gb_operation_result(struct gb_operation *operation)
 }
 EXPORT_SYMBOL_GPL(gb_operation_result);
 
+/*
+ * Looks up an outgoing operation on a connection and returns a refcounted
+ * pointer if found, or NULL otherwise.
+ */
 static struct gb_operation *
-gb_operation_find(struct gb_connection *connection, u16 operation_id)
+gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
 {
        struct gb_operation *operation;
        unsigned long flags;
        bool found = false;
 
-       spin_lock_irqsave(&gb_operations_lock, flags);
+       spin_lock_irqsave(&connection->lock, flags);
        list_for_each_entry(operation, &connection->operations, links)
-               if (operation->id == operation_id) {
+               if (operation->id == operation_id &&
+                               !gb_operation_is_incoming(operation)) {
+                       gb_operation_get(operation);
                        found = true;
                        break;
                }
-       spin_unlock_irqrestore(&gb_operations_lock, flags);
+       spin_unlock_irqrestore(&connection->lock, flags);
 
        return found ? operation : NULL;
 }
 
-static int gb_message_send(struct gb_message *message)
+static int gb_message_send(struct gb_message *message, gfp_t gfp)
 {
        struct gb_connection *connection = message->operation->connection;
-       int ret = 0;
-       void *cookie;
 
-       mutex_lock(&gb_message_mutex);
-       cookie = connection->hd->driver->message_send(connection->hd,
+       trace_gb_message_send(message);
+       return connection->hd->driver->message_send(connection->hd,
                                        connection->hd_cport_id,
                                        message,
-                                       GFP_KERNEL);
-       if (IS_ERR(cookie))
-               ret = PTR_ERR(cookie);
-       else
-               message->cookie = cookie;
-       mutex_unlock(&gb_message_mutex);
-
-       return ret;
+                                       gfp);
 }
 
 /*
@@ -157,48 +212,41 @@ static int gb_message_send(struct gb_message *message)
  */
 static void gb_message_cancel(struct gb_message *message)
 {
-       mutex_lock(&gb_message_mutex);
-       if (message->cookie) {
-               struct greybus_host_device *hd;
+       struct gb_host_device *hd = message->operation->connection->hd;
 
-               hd = message->operation->connection->hd;
-               hd->driver->message_cancel(message->cookie);
-       }
-       mutex_unlock(&gb_message_mutex);
+       hd->driver->message_cancel(message);
 }
 
 static void gb_operation_request_handle(struct gb_operation *operation)
 {
-       struct gb_protocol *protocol = operation->connection->protocol;
+       struct gb_connection *connection = operation->connection;
        int status;
        int ret;
 
-       if (!protocol)
-               return;
-
-       if (protocol->request_recv) {
-               status = protocol->request_recv(operation->type, operation);
+       if (connection->handler) {
+               status = connection->handler(operation);
        } else {
-               dev_err(&operation->connection->dev,
-                       "unexpected incoming request type 0x%02hhx\n",
-                       operation->type);
+               dev_err(&connection->hd->dev,
+                       "%s: unexpected incoming request of type 0x%02x\n",
+                       connection->name, operation->type);
 
                status = -EPROTONOSUPPORT;
        }
 
        ret = gb_operation_response_send(operation, status);
        if (ret) {
-               dev_err(&operation->connection->dev,
-                       "failed to send response %d: %d\n",
-                       status, ret);
-                       return;
+               dev_err(&connection->hd->dev,
+                       "%s: failed to send response %d for type 0x%02x: %d\n",
+                       connection->name, status, operation->type, ret);
+               return;
        }
 }
 
 /*
- * Complete an operation in non-atomic context.  For incoming
- * requests, the callback function is the request handler, and
- * the operation result should be -EINPROGRESS at this point.
+ * Process operation work.
+ *
+ * For incoming requests, call the protocol request handler. The operation
+ * result should be -EINPROGRESS at this point.
  *
  * For outgoing requests, the operation result value should have
  * been set before queueing this.  The operation callback function
@@ -211,19 +259,21 @@ static void gb_operation_work(struct work_struct *work)
 
        operation = container_of(work, struct gb_operation, work);
 
-       operation->callback(operation);
+       if (gb_operation_is_incoming(operation))
+               gb_operation_request_handle(operation);
+       else
+               operation->callback(operation);
 
+       gb_operation_put_active(operation);
        gb_operation_put(operation);
 }
 
-static void gb_operation_message_init(struct greybus_host_device *hd,
+static void gb_operation_message_init(struct gb_host_device *hd,
                                struct gb_message *message, u16 operation_id,
                                size_t payload_size, u8 type)
 {
        struct gb_operation_msg_hdr *header;
-       u8 *buffer;
 
-       buffer = message->buffer;
        header = message->buffer;
 
        message->header = header;
@@ -232,10 +282,10 @@ static void gb_operation_message_init(struct greybus_host_device *hd,
 
        /*
         * The type supplied for incoming message buffers will be
-        * 0x00.  Such buffers will be overwritten by arriving data
-        * so there's no need to initialize the message header.
+        * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
+        * arriving data so there's no need to initialize the message header.
         */
-       if (type != GB_OPERATION_TYPE_INVALID) {
+       if (type != GB_REQUEST_TYPE_INVALID) {
                u16 message_size = (u16)(sizeof(*header) + payload_size);
 
                /*
@@ -269,7 +319,7 @@ static void gb_operation_message_init(struct greybus_host_device *hd,
  *     message payload /  the message size
  */
 static struct gb_message *
-gb_operation_message_alloc(struct greybus_host_device *hd, u8 type,
+gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
                                size_t payload_size, gfp_t gfp_flags)
 {
        struct gb_message *message;
@@ -277,7 +327,7 @@ gb_operation_message_alloc(struct greybus_host_device *hd, u8 type,
        size_t message_size = payload_size + sizeof(*header);
 
        if (message_size > hd->buffer_size_max) {
-               pr_warn("requested message size too big (%zu > %zu)\n",
+               dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
                                message_size, hd->buffer_size_max);
                return NULL;
        }
@@ -377,16 +427,15 @@ static u8 gb_operation_errno_map(int errno)
 }
 
 bool gb_operation_response_alloc(struct gb_operation *operation,
-                                       size_t response_size)
+                                       size_t response_size, gfp_t gfp)
 {
-       struct greybus_host_device *hd = operation->connection->hd;
+       struct gb_host_device *hd = operation->connection->hd;
        struct gb_operation_msg_hdr *request_header;
        struct gb_message *response;
        u8 type;
 
        type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
-       response = gb_operation_message_alloc(hd, type, response_size,
-                                               GFP_KERNEL);
+       response = gb_operation_message_alloc(hd, type, response_size, gfp);
        if (!response)
                return false;
        response->operation = operation;
@@ -429,22 +478,12 @@ EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
  */
 static struct gb_operation *
 gb_operation_create_common(struct gb_connection *connection, u8 type,
-                               size_t request_size, size_t response_size)
+                               size_t request_size, size_t response_size,
+                               unsigned long op_flags, gfp_t gfp_flags)
 {
-       struct greybus_host_device *hd = connection->hd;
+       struct gb_host_device *hd = connection->hd;
        struct gb_operation *operation;
-       unsigned long flags;
-       gfp_t gfp_flags;
 
-       /*
-        * An incoming request will pass an invalid operation type,
-        * because the header will get overwritten anyway.  These
-        * occur in interrupt context, so we must use GFP_ATOMIC.
-        */
-       if (type == GB_OPERATION_TYPE_INVALID)
-               gfp_flags = GFP_ATOMIC;
-       else
-               gfp_flags = GFP_KERNEL;
        operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
        if (!operation)
                return NULL;
@@ -457,20 +496,21 @@ gb_operation_create_common(struct gb_connection *connection, u8 type,
        operation->request->operation = operation;
 
        /* Allocate the response buffer for outgoing operations */
-       if (type != GB_OPERATION_TYPE_INVALID) {
-               if (!gb_operation_response_alloc(operation, response_size))
+       if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
+               if (!gb_operation_response_alloc(operation, response_size,
+                                                gfp_flags)) {
                        goto err_request;
-               operation->type = type;
+               }
        }
+
+       operation->flags = op_flags;
+       operation->type = type;
        operation->errno = -EBADR;  /* Initial value--means "never set" */
 
        INIT_WORK(&operation->work, gb_operation_work);
        init_completion(&operation->completion);
        kref_init(&operation->kref);
-
-       spin_lock_irqsave(&gb_operations_lock, flags);
-       list_add_tail(&operation->links, &connection->operations);
-       spin_unlock_irqrestore(&gb_operations_lock, flags);
+       atomic_set(&operation->waiters, 0);
 
        return operation;
 
@@ -490,23 +530,29 @@ err_cache:
  * invalid operation type for all protocols, and this is enforced
  * here.
  */
-struct gb_operation *gb_operation_create(struct gb_connection *connection,
-                                       u8 type, size_t request_size,
-                                       size_t response_size)
+struct gb_operation *
+gb_operation_create_flags(struct gb_connection *connection,
+                               u8 type, size_t request_size,
+                               size_t response_size, unsigned long flags,
+                               gfp_t gfp)
 {
-       if (WARN_ON_ONCE(type == GB_OPERATION_TYPE_INVALID))
+       if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
                return NULL;
        if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
                type &= ~GB_MESSAGE_TYPE_RESPONSE;
 
+       if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
+               flags &= GB_OPERATION_FLAG_USER_MASK;
+
        return gb_operation_create_common(connection, type,
-                                       request_size, response_size);
+                                               request_size, response_size,
+                                               flags, gfp);
 }
-EXPORT_SYMBOL_GPL(gb_operation_create);
+EXPORT_SYMBOL_GPL(gb_operation_create_flags);
 
 size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
 {
-       struct greybus_host_device *hd = connection->hd;
+       struct gb_host_device *hd = connection->hd;
 
        return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
 }
@@ -518,18 +564,23 @@ gb_operation_create_incoming(struct gb_connection *connection, u16 id,
 {
        struct gb_operation *operation;
        size_t request_size;
+       unsigned long flags = GB_OPERATION_FLAG_INCOMING;
 
        /* Caller has made sure we at least have a message header. */
        request_size = size - sizeof(struct gb_operation_msg_hdr);
 
-       operation = gb_operation_create_common(connection,
-                                       GB_OPERATION_TYPE_INVALID,
-                                       request_size, 0);
-       if (operation) {
-               operation->id = id;
-               operation->type = type;
-               memcpy(operation->request->header, data, size);
-       }
+       if (!id)
+               flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
+
+       operation = gb_operation_create_common(connection, type,
+                                               request_size,
+                                               GB_REQUEST_TYPE_INVALID,
+                                               flags, GFP_ATOMIC);
+       if (!operation)
+               return NULL;
+
+       operation->id = id;
+       memcpy(operation->request->header, data, size);
 
        return operation;
 }
@@ -549,15 +600,9 @@ EXPORT_SYMBOL_GPL(gb_operation_get);
 static void _gb_operation_destroy(struct kref *kref)
 {
        struct gb_operation *operation;
-       unsigned long flags;
 
        operation = container_of(kref, struct gb_operation, kref);
 
-       /* XXX Make sure it's not in flight */
-       spin_lock_irqsave(&gb_operations_lock, flags);
-       list_del(&operation->links);
-       spin_unlock_irqrestore(&gb_operations_lock, flags);
-
        if (operation->response)
                gb_operation_message_free(operation->response);
        gb_operation_message_free(operation->request);
@@ -571,8 +616,10 @@ static void _gb_operation_destroy(struct kref *kref)
  */
 void gb_operation_put(struct gb_operation *operation)
 {
-       if (!WARN_ON(!operation))
-               kref_put(&operation->kref, _gb_operation_destroy);
+       if (WARN_ON(!operation))
+               return;
+
+       kref_put(&operation->kref, _gb_operation_destroy);
 }
 EXPORT_SYMBOL_GPL(gb_operation_put);
 
@@ -582,34 +629,37 @@ static void gb_operation_sync_callback(struct gb_operation *operation)
        complete(&operation->completion);
 }
 
-/*
- * Send an operation request message. The caller has filled in any payload so
- * the request message is ready to go. The callback function supplied will be
- * called when the response message has arrived indicating the operation is
- * complete. In that case, the callback function is responsible for fetching
- * the result of the operation using gb_operation_result() if desired, and
- * dropping the initial reference to the operation.
+/**
+ * gb_operation_request_send() - send an operation request message
+ * @operation: the operation to initiate
+ * @callback:  the operation completion callback
+ * @gfp:       the memory flags to use for any allocations
+ *
+ * The caller has filled in any payload so the request message is ready to go.
+ * The callback function supplied will be called when the response message has
+ * arrived, a unidirectional request has been sent, or the operation is
+ * cancelled, indicating that the operation is complete. The callback function
+ * can fetch the result of the operation using gb_operation_result() if
+ * desired.
+ *
+ * Return: 0 if the request was successfully queued in the host-driver queues,
+ * or a negative errno.
  */
 int gb_operation_request_send(struct gb_operation *operation,
-                               gb_operation_callback callback)
+                               gb_operation_callback callback,
+                               gfp_t gfp)
 {
        struct gb_connection *connection = operation->connection;
        struct gb_operation_msg_hdr *header;
        unsigned int cycle;
        int ret;
 
+       if (gb_connection_is_offloaded(connection))
+               return -EBUSY;
+
        if (!callback)
                return -EINVAL;
 
-       if (connection->state != GB_CONNECTION_STATE_ENABLED)
-               return -ENOTCONN;
-
-       /*
-        * First, get an extra reference on the operation.
-        * It'll be dropped when the operation completes.
-        */
-       gb_operation_get(operation);
-
        /*
         * Record the callback function, which is executed in
         * non-atomic (workqueue) context when the final result
@@ -619,19 +669,39 @@ int gb_operation_request_send(struct gb_operation *operation,
 
        /*
         * Assign the operation's id, and store it in the request header.
-        * Zero is a reserved operation id.
+        * Zero is a reserved operation id for unidirectional operations.
         */
-       cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
-       operation->id = (u16)(cycle % U16_MAX + 1);
+       if (gb_operation_is_unidirectional(operation)) {
+               operation->id = 0;
+       } else {
+               cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
+               operation->id = (u16)(cycle % U16_MAX + 1);
+       }
+
        header = operation->request->header;
        header->operation_id = cpu_to_le16(operation->id);
 
-       /* All set, send the request */
        gb_operation_result_set(operation, -EINPROGRESS);
 
-       ret = gb_message_send(operation->request);
+       /*
+        * Get an extra reference on the operation. It'll be dropped when the
+        * operation completes.
+        */
+       gb_operation_get(operation);
+       ret = gb_operation_get_active(operation);
        if (ret)
-               gb_operation_put(operation);
+               goto err_put;
+
+       ret = gb_message_send(operation->request, gfp);
+       if (ret)
+               goto err_put_active;
+
+       return 0;
+
+err_put_active:
+       gb_operation_put_active(operation);
+err_put:
+       gb_operation_put(operation);
 
        return ret;
 }
@@ -643,17 +713,24 @@ EXPORT_SYMBOL_GPL(gb_operation_request_send);
  * error is detected.  The return value is the result of the
  * operation.
  */
-int gb_operation_request_send_sync(struct gb_operation *operation)
+int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
+                                               unsigned int timeout)
 {
        int ret;
-       unsigned long timeout;
+       unsigned long timeout_jiffies;
 
-       ret = gb_operation_request_send(operation, gb_operation_sync_callback);
+       ret = gb_operation_request_send(operation, gb_operation_sync_callback,
+                                       GFP_KERNEL);
        if (ret)
                return ret;
 
-       timeout = msecs_to_jiffies(OPERATION_TIMEOUT_DEFAULT);
-       ret = wait_for_completion_interruptible_timeout(&operation->completion, timeout);
+       if (timeout)
+               timeout_jiffies = msecs_to_jiffies(timeout);
+       else
+               timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+
+       ret = wait_for_completion_interruptible_timeout(&operation->completion,
+                                                       timeout_jiffies);
        if (ret < 0) {
                /* Cancel the operation if interrupted */
                gb_operation_cancel(operation, -ECANCELED);
@@ -664,7 +741,7 @@ int gb_operation_request_send_sync(struct gb_operation *operation)
 
        return gb_operation_result(operation);
 }
-EXPORT_SYMBOL_GPL(gb_operation_request_send_sync);
+EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
 
 /*
  * Send a response for an incoming operation request.  A non-zero
@@ -675,71 +752,85 @@ EXPORT_SYMBOL_GPL(gb_operation_request_send_sync);
  * it can simply supply the result errno; this function will
  * allocate the response message if necessary.
  */
-int gb_operation_response_send(struct gb_operation *operation, int errno)
+static int gb_operation_response_send(struct gb_operation *operation,
+                                       int errno)
 {
        struct gb_connection *connection = operation->connection;
        int ret;
 
+       if (!operation->response &&
+                       !gb_operation_is_unidirectional(operation)) {
+               if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
+                       return -ENOMEM;
+       }
+
        /* Record the result */
        if (!gb_operation_result_set(operation, errno)) {
-               dev_err(&connection->dev, "request result already set\n");
+               dev_err(&connection->hd->dev, "request result already set\n");
                return -EIO;    /* Shouldn't happen */
        }
 
-       if (!operation->response) {
-               if (!gb_operation_response_alloc(operation, 0)) {
-                       dev_err(&connection->dev,
-                               "error allocating response\n");
-                       /* XXX Respond with pre-allocated -ENOMEM? */
-                       return -ENOMEM;
-               }
-       }
+       /* Sender of request does not care about response. */
+       if (gb_operation_is_unidirectional(operation))
+               return 0;
 
        /* Reference will be dropped when message has been sent. */
        gb_operation_get(operation);
+       ret = gb_operation_get_active(operation);
+       if (ret)
+               goto err_put;
 
        /* Fill in the response header and send it */
        operation->response->header->result = gb_operation_errno_map(errno);
 
-       ret = gb_message_send(operation->response);
+       ret = gb_message_send(operation->response, GFP_KERNEL);
        if (ret)
-               gb_operation_put(operation);
+               goto err_put_active;
+
+       return 0;
+
+err_put_active:
+       gb_operation_put_active(operation);
+err_put:
+       gb_operation_put(operation);
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(gb_operation_response_send);
 
 /*
  * This function is called when a message send request has completed.
  */
-void greybus_message_sent(struct greybus_host_device *hd,
+void greybus_message_sent(struct gb_host_device *hd,
                                        struct gb_message *message, int status)
 {
-       struct gb_operation *operation;
-
-       /* Get the message and record that it is no longer in flight */
-       message->cookie = NULL;
+       struct gb_operation *operation = message->operation;
+       struct gb_connection *connection = operation->connection;
 
        /*
         * If the message was a response, we just need to drop our
         * reference to the operation.  If an error occurred, report
         * it.
         *
-        * For requests, if there's no error, there's nothing more
-        * to do until the response arrives.  If an error occurred
-        * attempting to send it, record that as the result of
-        * the operation and schedule its completion.
+        * For requests, if there's no error and the operation in not
+        * unidirectional, there's nothing more to do until the response
+        * arrives. If an error occurred attempting to send it, or if the
+        * operation is unidrectional, record the result of the operation and
+        * schedule its completion.
         */
-       operation = message->operation;
        if (message == operation->response) {
                if (status) {
-                       dev_err(&operation->connection->dev,
-                               "error sending response: %d\n", status);
+                       dev_err(&connection->hd->dev,
+                               "%s: error sending response 0x%02x: %d\n",
+                               connection->name, operation->type, status);
                }
+
+               gb_operation_put_active(operation);
                gb_operation_put(operation);
-       } else if (status) {
-               if (gb_operation_result_set(operation, status))
-                       queue_work(gb_operation_workqueue, &operation->work);
+       } else if (status || gb_operation_is_unidirectional(operation)) {
+               if (gb_operation_result_set(operation, status)) {
+                       queue_work(gb_operation_completion_wq,
+                                       &operation->work);
+               }
        }
 }
 EXPORT_SYMBOL_GPL(greybus_message_sent);
@@ -756,25 +847,30 @@ static void gb_connection_recv_request(struct gb_connection *connection,
                                       void *data, size_t size)
 {
        struct gb_operation *operation;
+       int ret;
 
        operation = gb_operation_create_incoming(connection, operation_id,
                                                type, data, size);
        if (!operation) {
-               dev_err(&connection->dev, "can't create operation\n");
-               return;         /* XXX Respond with pre-allocated ENOMEM */
+               dev_err(&connection->hd->dev,
+                       "%s: can't create incoming operation\n",
+                       connection->name);
+               return;
        }
 
+       ret = gb_operation_get_active(operation);
+       if (ret) {
+               gb_operation_put(operation);
+               return;
+       }
+       trace_gb_message_recv_request(operation->request);
+
        /*
-        * Incoming requests are handled by arranging for the
-        * request handler to be the operation's callback function.
-        *
-        * The last thing the handler does is send a response
-        * message. The initial reference to the operation will be
-        * dropped when the handler returns.
+        * The initial reference to the operation will be dropped when the
+        * request handler returns.
         */
-       operation->callback = gb_operation_request_handle;
        if (gb_operation_result_set(operation, -EINPROGRESS))
-               queue_work(gb_operation_workqueue, &operation->work);
+               queue_work(connection->wq, &operation->work);
 }
 
 /*
@@ -788,33 +884,60 @@ static void gb_connection_recv_request(struct gb_connection *connection,
 static void gb_connection_recv_response(struct gb_connection *connection,
                        u16 operation_id, u8 result, void *data, size_t size)
 {
+       struct gb_operation_msg_hdr *header;
        struct gb_operation *operation;
        struct gb_message *message;
        int errno = gb_operation_status_map(result);
        size_t message_size;
 
-       operation = gb_operation_find(connection, operation_id);
+       if (!operation_id) {
+               dev_err_ratelimited(&connection->hd->dev,
+                               "%s: invalid response id 0 received\n",
+                               connection->name);
+               return;
+       }
+
+       operation = gb_operation_find_outgoing(connection, operation_id);
        if (!operation) {
-               dev_err(&connection->dev, "operation not found\n");
+               dev_err_ratelimited(&connection->hd->dev,
+                               "%s: unexpected response id 0x%04x received\n",
+                               connection->name, operation_id);
                return;
        }
 
        message = operation->response;
-       message_size = sizeof(*message->header) + message->payload_size;
-       if (!errno && size != message_size) {
-               dev_err(&connection->dev, "bad message size (%zu != %zu)\n",
-                       size, message_size);
+       header = message->header;
+       message_size = sizeof(*header) + message->payload_size;
+       if (!errno && size > message_size) {
+               dev_err_ratelimited(&connection->hd->dev,
+                               "%s: malformed response 0x%02x received (%zu > %zu)\n",
+                               connection->name, header->type,
+                               size, message_size);
                errno = -EMSGSIZE;
+       } else if (!errno && size < message_size) {
+               if (gb_operation_short_response_allowed(operation)) {
+                       message->payload_size = size - sizeof(*header);
+               } else {
+                       dev_err_ratelimited(&connection->hd->dev,
+                                       "%s: short response 0x%02x received (%zu < %zu)\n",
+                                       connection->name, header->type,
+                                       size, message_size);
+                       errno = -EMSGSIZE;
+               }
        }
+       trace_gb_message_recv_response(operation->response);
 
        /* We must ignore the payload if a bad status is returned */
        if (errno)
-               size = sizeof(*message->header);
-       memcpy(message->header, data, size);
+               size = sizeof(*header);
 
        /* The rest will be handled in work queue context */
-       if (gb_operation_result_set(operation, errno))
-               queue_work(gb_operation_workqueue, &operation->work);
+       if (gb_operation_result_set(operation, errno)) {
+               memcpy(header, data, size);
+               queue_work(gb_operation_completion_wq, &operation->work);
+       }
+
+       gb_operation_put(operation);
 }
 
 /*
@@ -826,17 +949,21 @@ void gb_connection_recv(struct gb_connection *connection,
                                void *data, size_t size)
 {
        struct gb_operation_msg_hdr header;
+       struct device *dev = &connection->hd->dev;
        size_t msg_size;
        u16 operation_id;
 
-       if (connection->state != GB_CONNECTION_STATE_ENABLED) {
-               dev_err(&connection->dev, "dropping %zu received bytes\n",
-                       size);
+       if ((connection->state != GB_CONNECTION_STATE_ENABLED &&
+                       connection->state != GB_CONNECTION_STATE_ENABLED_TX) ||
+                       gb_connection_is_offloaded(connection)) {
+               dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
+                               connection->name, size);
                return;
        }
 
        if (size < sizeof(header)) {
-               dev_err(&connection->dev, "message too small\n");
+               dev_err_ratelimited(dev, "%s: short message received\n",
+                               connection->name);
                return;
        }
 
@@ -844,9 +971,11 @@ void gb_connection_recv(struct gb_connection *connection,
        memcpy(&header, data, sizeof(header));
        msg_size = le16_to_cpu(header.size);
        if (size < msg_size) {
-               dev_err(&connection->dev,
-                       "incomplete message received: 0x%04x (%zu < %zu)\n",
-                       le16_to_cpu(header.operation_id), size, msg_size);
+               dev_err_ratelimited(dev,
+                               "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
+                               connection->name,
+                               le16_to_cpu(header.operation_id),
+                               header.type, size, msg_size);
                return;         /* XXX Should still complete operation */
        }
 
@@ -860,27 +989,62 @@ void gb_connection_recv(struct gb_connection *connection,
 }
 
 /*
- * Cancel an operation, and record the given error to indicate why.
+ * Cancel an outgoing operation synchronously, and record the given error to
+ * indicate why.
  */
 void gb_operation_cancel(struct gb_operation *operation, int errno)
 {
+       if (WARN_ON(gb_operation_is_incoming(operation)))
+               return;
+
        if (gb_operation_result_set(operation, errno)) {
                gb_message_cancel(operation->request);
-               if (operation->response)
-                       gb_message_cancel(operation->response);
+               queue_work(gb_operation_completion_wq, &operation->work);
        }
-       gb_operation_put(operation);
+       trace_gb_message_cancel_outgoing(operation->request);
+
+       atomic_inc(&operation->waiters);
+       wait_event(gb_operation_cancellation_queue,
+                       !gb_operation_is_active(operation));
+       atomic_dec(&operation->waiters);
 }
 EXPORT_SYMBOL_GPL(gb_operation_cancel);
 
+/*
+ * Cancel an incoming operation synchronously. Called during connection tear
+ * down.
+ */
+void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
+{
+       if (WARN_ON(!gb_operation_is_incoming(operation)))
+               return;
+
+       if (!gb_operation_is_unidirectional(operation)) {
+               /*
+                * Make sure the request handler has submitted the response
+                * before cancelling it.
+                */
+               flush_work(&operation->work);
+               if (!gb_operation_result_set(operation, errno))
+                       gb_message_cancel(operation->response);
+       }
+       trace_gb_message_cancel_incoming(operation->response);
+
+       atomic_inc(&operation->waiters);
+       wait_event(gb_operation_cancellation_queue,
+                       !gb_operation_is_active(operation));
+       atomic_dec(&operation->waiters);
+}
+
 /**
- * gb_operation_sync: implement a "simple" synchronous gb operation.
+ * gb_operation_sync_timeout() - implement a "simple" synchronous operation
  * @connection: the Greybus connection to send this to
  * @type: the type of operation to send
  * @request: pointer to a memory buffer to copy the request from
  * @request_size: size of @request
  * @response: pointer to a memory buffer to copy the response to
  * @response_size: the size of @response.
+ * @timeout: operation timeout in milliseconds
  *
  * This function implements a simple synchronous Greybus operation.  It sends
  * the provided operation request and waits (sleeps) until the corresponding
@@ -895,9 +1059,10 @@ EXPORT_SYMBOL_GPL(gb_operation_cancel);
  *
  * If there is an error, the response buffer is left alone.
  */
-int gb_operation_sync(struct gb_connection *connection, int type,
-                     void *request, int request_size,
-                     void *response, int response_size)
+int gb_operation_sync_timeout(struct gb_connection *connection, int type,
+                               void *request, int request_size,
+                               void *response, int response_size,
+                               unsigned int timeout)
 {
        struct gb_operation *operation;
        int ret;
@@ -907,30 +1072,80 @@ int gb_operation_sync(struct gb_connection *connection, int type,
                return -EINVAL;
 
        operation = gb_operation_create(connection, type,
-                                       request_size, response_size);
+                                       request_size, response_size,
+                                       GFP_KERNEL);
        if (!operation)
                return -ENOMEM;
 
        if (request_size)
                memcpy(operation->request->payload, request, request_size);
 
-       ret = gb_operation_request_send_sync(operation);
+       ret = gb_operation_request_send_sync_timeout(operation, timeout);
        if (ret) {
-               dev_err(&connection->dev, "synchronous operation failed: %d\n",
-                       ret);
+               dev_err(&connection->hd->dev,
+                       "%s: synchronous operation of type 0x%02x failed: %d\n",
+                       connection->name, type, ret);
        } else {
                if (response_size) {
                        memcpy(response, operation->response->payload,
                               response_size);
                }
        }
-       gb_operation_destroy(operation);
+
+       gb_operation_put(operation);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
+
+/**
+ * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
+ * @connection:                connection to use
+ * @type:              type of operation to send
+ * @request:           memory buffer to copy the request from
+ * @request_size:      size of @request
+ * @timeout:           send timeout in milliseconds
+ *
+ * Initiate a unidirectional operation by sending a request message and
+ * waiting for it to be acknowledged as sent by the host device.
+ *
+ * Note that successful send of a unidirectional operation does not imply that
+ * the request as actually reached the remote end of the connection.
+ */
+int gb_operation_unidirectional_timeout(struct gb_connection *connection,
+                               int type, void *request, int request_size,
+                               unsigned int timeout)
+{
+       struct gb_operation *operation;
+       int ret;
+
+       if (request_size && !request)
+               return -EINVAL;
+
+       operation = gb_operation_create_flags(connection, type,
+                                       request_size, 0,
+                                       GB_OPERATION_FLAG_UNIDIRECTIONAL,
+                                       GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       if (request_size)
+               memcpy(operation->request->payload, request, request_size);
+
+       ret = gb_operation_request_send_sync_timeout(operation, timeout);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: unidirectional operation of type 0x%02x failed: %d\n",
+                       connection->name, type, ret);
+       }
+
+       gb_operation_put(operation);
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(gb_operation_sync);
+EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
 
-int gb_operation_init(void)
+int __init gb_operation_init(void)
 {
        gb_message_cache = kmem_cache_create("gb_message_cache",
                                sizeof(struct gb_message), 0, 0, NULL);
@@ -942,12 +1157,14 @@ int gb_operation_init(void)
        if (!gb_operation_cache)
                goto err_destroy_message_cache;
 
-       gb_operation_workqueue = alloc_workqueue("greybus_operation", 0, 1);
-       if (!gb_operation_workqueue)
-               goto err_operation;
+       gb_operation_completion_wq = alloc_workqueue("greybus_completion",
+                               0, 0);
+       if (!gb_operation_completion_wq)
+               goto err_destroy_operation_cache;
 
        return 0;
-err_operation:
+
+err_destroy_operation_cache:
        kmem_cache_destroy(gb_operation_cache);
        gb_operation_cache = NULL;
 err_destroy_message_cache:
@@ -959,8 +1176,8 @@ err_destroy_message_cache:
 
 void gb_operation_exit(void)
 {
-       destroy_workqueue(gb_operation_workqueue);
-       gb_operation_workqueue = NULL;
+       destroy_workqueue(gb_operation_completion_wq);
+       gb_operation_completion_wq = NULL;
        kmem_cache_destroy(gb_operation_cache);
        gb_operation_cache = NULL;
        kmem_cache_destroy(gb_message_cache);