X-Git-Url: http://git.cascardo.info/?a=blobdiff_plain;f=drivers%2Fstaging%2Fgreybus%2Foperation.c;h=659e84b10c7f14bfd73e43d2957ee36c44ac2e35;hb=dacf3eb7f3c1c474a55f7c24eef39011784d558c;hp=abe44c18fb9e2cc61234d96cc891ef52ed790f24;hpb=3eeac7e37ce9856e53693772dfe81a79b57b5a00;p=cascardo%2Flinux.git diff --git a/drivers/staging/greybus/operation.c b/drivers/staging/greybus/operation.c index abe44c18fb9e..659e84b10c7f 100644 --- a/drivers/staging/greybus/operation.c +++ b/drivers/staging/greybus/operation.c @@ -10,38 +10,101 @@ #include #include #include +#include +#include #include #include "greybus.h" - -/* The default amount of time a request is given to complete */ -#define OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */ +#include "greybus_trace.h" static struct kmem_cache *gb_operation_cache; static struct kmem_cache *gb_message_cache; /* Workqueue to handle Greybus operation completions. */ -static struct workqueue_struct *gb_operation_workqueue; +static struct workqueue_struct *gb_operation_completion_wq; + +/* Wait queue for synchronous cancellations. */ +static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue); /* - * Protects access to connection operations lists, as well as - * updates to operation->errno. + * Protects updates to operation->errno. */ static DEFINE_SPINLOCK(gb_operations_lock); static int gb_operation_response_send(struct gb_operation *operation, int errno); -/* Caller holds operation reference. */ -static inline void gb_operation_get_active(struct gb_operation *operation) +/* + * Increment operation active count and add to connection list unless the + * connection is going away. + * + * Caller holds operation reference. + */ +static int gb_operation_get_active(struct gb_operation *operation) { - atomic_inc(&operation->active); + struct gb_connection *connection = operation->connection; + unsigned long flags; + + spin_lock_irqsave(&connection->lock, flags); + switch (connection->state) { + case GB_CONNECTION_STATE_ENABLED: + break; + case GB_CONNECTION_STATE_ENABLED_TX: + if (gb_operation_is_incoming(operation)) + goto err_unlock; + break; + case GB_CONNECTION_STATE_DISCONNECTING: + if (!gb_operation_is_core(operation)) + goto err_unlock; + break; + default: + goto err_unlock; + } + + if (operation->active++ == 0) + list_add_tail(&operation->links, &connection->operations); + + trace_gb_operation_get_active(operation); + + spin_unlock_irqrestore(&connection->lock, flags); + + return 0; + +err_unlock: + spin_unlock_irqrestore(&connection->lock, flags); + + return -ENOTCONN; } /* Caller holds operation reference. */ -static inline void gb_operation_put_active(struct gb_operation *operation) +static void gb_operation_put_active(struct gb_operation *operation) +{ + struct gb_connection *connection = operation->connection; + unsigned long flags; + + spin_lock_irqsave(&connection->lock, flags); + + trace_gb_operation_put_active(operation); + + if (--operation->active == 0) { + list_del(&operation->links); + if (atomic_read(&operation->waiters)) + wake_up(&gb_operation_cancellation_queue); + } + spin_unlock_irqrestore(&connection->lock, flags); +} + +static bool gb_operation_is_active(struct gb_operation *operation) { - atomic_dec(&operation->active); + struct gb_connection *connection = operation->connection; + unsigned long flags; + bool ret; + + spin_lock_irqsave(&connection->lock, flags); + ret = operation->active; + spin_unlock_irqrestore(&connection->lock, flags); + + return ret; } /* @@ -137,7 +200,7 @@ gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id) unsigned long flags; bool found = false; - spin_lock_irqsave(&gb_operations_lock, flags); + spin_lock_irqsave(&connection->lock, flags); list_for_each_entry(operation, &connection->operations, links) if (operation->id == operation_id && !gb_operation_is_incoming(operation)) { @@ -145,7 +208,7 @@ gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id) found = true; break; } - spin_unlock_irqrestore(&gb_operations_lock, flags); + spin_unlock_irqrestore(&connection->lock, flags); return found ? operation : NULL; } @@ -154,10 +217,11 @@ static int gb_message_send(struct gb_message *message, gfp_t gfp) { struct gb_connection *connection = message->operation->connection; + trace_gb_message_send(message); return connection->hd->driver->message_send(connection->hd, connection->hd_cport_id, message, - GFP_KERNEL); + gfp); } /* @@ -165,43 +229,41 @@ static int gb_message_send(struct gb_message *message, gfp_t gfp) */ static void gb_message_cancel(struct gb_message *message) { - struct greybus_host_device *hd = message->operation->connection->hd; + struct gb_host_device *hd = message->operation->connection->hd; hd->driver->message_cancel(message); } static void gb_operation_request_handle(struct gb_operation *operation) { - struct gb_protocol *protocol = operation->connection->protocol; + struct gb_connection *connection = operation->connection; int status; int ret; - if (!protocol) - return; - - if (protocol->request_recv) { - status = protocol->request_recv(operation->type, operation); + if (connection->handler) { + status = connection->handler(operation); } else { - dev_err(&operation->connection->dev, - "unexpected incoming request type 0x%02hhx\n", - operation->type); + dev_err(&connection->hd->dev, + "%s: unexpected incoming request of type 0x%02x\n", + connection->name, operation->type); status = -EPROTONOSUPPORT; } ret = gb_operation_response_send(operation, status); if (ret) { - dev_err(&operation->connection->dev, - "failed to send response %d: %d\n", - status, ret); - return; + dev_err(&connection->hd->dev, + "%s: failed to send response %d for type 0x%02x: %d\n", + connection->name, status, operation->type, ret); + return; } } /* - * Complete an operation in non-atomic context. For incoming - * requests, the callback function is the request handler, and - * the operation result should be -EINPROGRESS at this point. + * Process operation work. + * + * For incoming requests, call the protocol request handler. The operation + * result should be -EINPROGRESS at this point. * * For outgoing requests, the operation result value should have * been set before queueing this. The operation callback function @@ -214,13 +276,16 @@ static void gb_operation_work(struct work_struct *work) operation = container_of(work, struct gb_operation, work); - operation->callback(operation); + if (gb_operation_is_incoming(operation)) + gb_operation_request_handle(operation); + else + operation->callback(operation); gb_operation_put_active(operation); gb_operation_put(operation); } -static void gb_operation_message_init(struct greybus_host_device *hd, +static void gb_operation_message_init(struct gb_host_device *hd, struct gb_message *message, u16 operation_id, size_t payload_size, u8 type) { @@ -234,10 +299,10 @@ static void gb_operation_message_init(struct greybus_host_device *hd, /* * The type supplied for incoming message buffers will be - * 0x00. Such buffers will be overwritten by arriving data - * so there's no need to initialize the message header. + * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by + * arriving data so there's no need to initialize the message header. */ - if (type != GB_OPERATION_TYPE_INVALID) { + if (type != GB_REQUEST_TYPE_INVALID) { u16 message_size = (u16)(sizeof(*header) + payload_size); /* @@ -271,7 +336,7 @@ static void gb_operation_message_init(struct greybus_host_device *hd, * message payload / the message size */ static struct gb_message * -gb_operation_message_alloc(struct greybus_host_device *hd, u8 type, +gb_operation_message_alloc(struct gb_host_device *hd, u8 type, size_t payload_size, gfp_t gfp_flags) { struct gb_message *message; @@ -279,7 +344,7 @@ gb_operation_message_alloc(struct greybus_host_device *hd, u8 type, size_t message_size = payload_size + sizeof(*header); if (message_size > hd->buffer_size_max) { - pr_warn("requested message size too big (%zu > %zu)\n", + dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n", message_size, hd->buffer_size_max); return NULL; } @@ -379,16 +444,15 @@ static u8 gb_operation_errno_map(int errno) } bool gb_operation_response_alloc(struct gb_operation *operation, - size_t response_size) + size_t response_size, gfp_t gfp) { - struct greybus_host_device *hd = operation->connection->hd; + struct gb_host_device *hd = operation->connection->hd; struct gb_operation_msg_hdr *request_header; struct gb_message *response; u8 type; type = operation->type | GB_MESSAGE_TYPE_RESPONSE; - response = gb_operation_message_alloc(hd, type, response_size, - GFP_KERNEL); + response = gb_operation_message_alloc(hd, type, response_size, gfp); if (!response) return false; response->operation = operation; @@ -434,9 +498,8 @@ gb_operation_create_common(struct gb_connection *connection, u8 type, size_t request_size, size_t response_size, unsigned long op_flags, gfp_t gfp_flags) { - struct greybus_host_device *hd = connection->hd; + struct gb_host_device *hd = connection->hd; struct gb_operation *operation; - unsigned long flags; operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags); if (!operation) @@ -451,8 +514,10 @@ gb_operation_create_common(struct gb_connection *connection, u8 type, /* Allocate the response buffer for outgoing operations */ if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) { - if (!gb_operation_response_alloc(operation, response_size)) + if (!gb_operation_response_alloc(operation, response_size, + gfp_flags)) { goto err_request; + } } operation->flags = op_flags; @@ -462,11 +527,7 @@ gb_operation_create_common(struct gb_connection *connection, u8 type, INIT_WORK(&operation->work, gb_operation_work); init_completion(&operation->completion); kref_init(&operation->kref); - atomic_set(&operation->active, 0); - - spin_lock_irqsave(&gb_operations_lock, flags); - list_add_tail(&operation->links, &connection->operations); - spin_unlock_irqrestore(&gb_operations_lock, flags); + atomic_set(&operation->waiters, 0); return operation; @@ -486,24 +547,55 @@ err_cache: * invalid operation type for all protocols, and this is enforced * here. */ -struct gb_operation *gb_operation_create(struct gb_connection *connection, - u8 type, size_t request_size, - size_t response_size, - gfp_t gfp) +struct gb_operation * +gb_operation_create_flags(struct gb_connection *connection, + u8 type, size_t request_size, + size_t response_size, unsigned long flags, + gfp_t gfp) { - if (WARN_ON_ONCE(type == GB_OPERATION_TYPE_INVALID)) + struct gb_operation *operation; + + if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID)) return NULL; if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE)) type &= ~GB_MESSAGE_TYPE_RESPONSE; - return gb_operation_create_common(connection, type, - request_size, response_size, 0, gfp); + if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK)) + flags &= GB_OPERATION_FLAG_USER_MASK; + + operation = gb_operation_create_common(connection, type, + request_size, response_size, + flags, gfp); + if (operation) + trace_gb_operation_create(operation); + + return operation; +} +EXPORT_SYMBOL_GPL(gb_operation_create_flags); + +struct gb_operation * +gb_operation_create_core(struct gb_connection *connection, + u8 type, size_t request_size, + size_t response_size, unsigned long flags, + gfp_t gfp) +{ + struct gb_operation *operation; + + flags |= GB_OPERATION_FLAG_CORE; + + operation = gb_operation_create_common(connection, type, + request_size, response_size, + flags, gfp); + if (operation) + trace_gb_operation_create_core(operation); + + return operation; } -EXPORT_SYMBOL_GPL(gb_operation_create); +/* Do not export this function. */ size_t gb_operation_get_payload_size_max(struct gb_connection *connection) { - struct greybus_host_device *hd = connection->hd; + struct gb_host_device *hd = connection->hd; return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr); } @@ -524,12 +616,15 @@ gb_operation_create_incoming(struct gb_connection *connection, u16 id, flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL; operation = gb_operation_create_common(connection, type, - request_size, 0, flags, GFP_ATOMIC); + request_size, + GB_REQUEST_TYPE_INVALID, + flags, GFP_ATOMIC); if (!operation) return NULL; operation->id = id; memcpy(operation->request->header, data, size); + trace_gb_operation_create_incoming(operation); return operation; } @@ -552,9 +647,7 @@ static void _gb_operation_destroy(struct kref *kref) operation = container_of(kref, struct gb_operation, kref); - /* XXX Make sure it's not in flight */ - list_del(&operation->links); - spin_unlock(&gb_operations_lock); + trace_gb_operation_destroy(operation); if (operation->response) gb_operation_message_free(operation->response); @@ -572,8 +665,7 @@ void gb_operation_put(struct gb_operation *operation) if (WARN_ON(!operation)) return; - kref_put_spinlock_irqsave(&operation->kref, _gb_operation_destroy, - &gb_operations_lock); + kref_put(&operation->kref, _gb_operation_destroy); } EXPORT_SYMBOL_GPL(gb_operation_put); @@ -583,13 +675,21 @@ static void gb_operation_sync_callback(struct gb_operation *operation) complete(&operation->completion); } -/* - * Send an operation request message. The caller has filled in any payload so - * the request message is ready to go. The callback function supplied will be - * called when the response message has arrived indicating the operation is - * complete. In that case, the callback function is responsible for fetching - * the result of the operation using gb_operation_result() if desired, and - * dropping the initial reference to the operation. +/** + * gb_operation_request_send() - send an operation request message + * @operation: the operation to initiate + * @callback: the operation completion callback + * @gfp: the memory flags to use for any allocations + * + * The caller has filled in any payload so the request message is ready to go. + * The callback function supplied will be called when the response message has + * arrived, a unidirectional request has been sent, or the operation is + * cancelled, indicating that the operation is complete. The callback function + * can fetch the result of the operation using gb_operation_result() if + * desired. + * + * Return: 0 if the request was successfully queued in the host-driver queues, + * or a negative errno. */ int gb_operation_request_send(struct gb_operation *operation, gb_operation_callback callback, @@ -600,19 +700,12 @@ int gb_operation_request_send(struct gb_operation *operation, unsigned int cycle; int ret; + if (gb_connection_is_offloaded(connection)) + return -EBUSY; + if (!callback) return -EINVAL; - if (connection->state != GB_CONNECTION_STATE_ENABLED) - return -ENOTCONN; - - /* - * First, get an extra reference on the operation. - * It'll be dropped when the operation completes. - */ - gb_operation_get(operation); - gb_operation_get_active(operation); - /* * Record the callback function, which is executed in * non-atomic (workqueue) context when the final result @@ -622,21 +715,39 @@ int gb_operation_request_send(struct gb_operation *operation, /* * Assign the operation's id, and store it in the request header. - * Zero is a reserved operation id. + * Zero is a reserved operation id for unidirectional operations. */ - cycle = (unsigned int)atomic_inc_return(&connection->op_cycle); - operation->id = (u16)(cycle % U16_MAX + 1); + if (gb_operation_is_unidirectional(operation)) { + operation->id = 0; + } else { + cycle = (unsigned int)atomic_inc_return(&connection->op_cycle); + operation->id = (u16)(cycle % U16_MAX + 1); + } + header = operation->request->header; header->operation_id = cpu_to_le16(operation->id); - /* All set, send the request */ gb_operation_result_set(operation, -EINPROGRESS); + /* + * Get an extra reference on the operation. It'll be dropped when the + * operation completes. + */ + gb_operation_get(operation); + ret = gb_operation_get_active(operation); + if (ret) + goto err_put; + ret = gb_message_send(operation->request, gfp); - if (ret) { - gb_operation_put_active(operation); - gb_operation_put(operation); - } + if (ret) + goto err_put_active; + + return 0; + +err_put_active: + gb_operation_put_active(operation); +err_put: + gb_operation_put(operation); return ret; } @@ -648,18 +759,24 @@ EXPORT_SYMBOL_GPL(gb_operation_request_send); * error is detected. The return value is the result of the * operation. */ -int gb_operation_request_send_sync(struct gb_operation *operation) +int gb_operation_request_send_sync_timeout(struct gb_operation *operation, + unsigned int timeout) { int ret; - unsigned long timeout; + unsigned long timeout_jiffies; ret = gb_operation_request_send(operation, gb_operation_sync_callback, GFP_KERNEL); if (ret) return ret; - timeout = msecs_to_jiffies(OPERATION_TIMEOUT_DEFAULT); - ret = wait_for_completion_interruptible_timeout(&operation->completion, timeout); + if (timeout) + timeout_jiffies = msecs_to_jiffies(timeout); + else + timeout_jiffies = MAX_SCHEDULE_TIMEOUT; + + ret = wait_for_completion_interruptible_timeout(&operation->completion, + timeout_jiffies); if (ret < 0) { /* Cancel the operation if interrupted */ gb_operation_cancel(operation, -ECANCELED); @@ -670,7 +787,7 @@ int gb_operation_request_send_sync(struct gb_operation *operation) return gb_operation_result(operation); } -EXPORT_SYMBOL_GPL(gb_operation_request_send_sync); +EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout); /* * Send a response for an incoming operation request. A non-zero @@ -689,13 +806,13 @@ static int gb_operation_response_send(struct gb_operation *operation, if (!operation->response && !gb_operation_is_unidirectional(operation)) { - if (!gb_operation_response_alloc(operation, 0)) + if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL)) return -ENOMEM; } /* Record the result */ if (!gb_operation_result_set(operation, errno)) { - dev_err(&connection->dev, "request result already set\n"); + dev_err(&connection->hd->dev, "request result already set\n"); return -EIO; /* Shouldn't happen */ } @@ -705,16 +822,23 @@ static int gb_operation_response_send(struct gb_operation *operation, /* Reference will be dropped when message has been sent. */ gb_operation_get(operation); - gb_operation_get_active(operation); + ret = gb_operation_get_active(operation); + if (ret) + goto err_put; /* Fill in the response header and send it */ operation->response->header->result = gb_operation_errno_map(errno); ret = gb_message_send(operation->response, GFP_KERNEL); - if (ret) { - gb_operation_put_active(operation); - gb_operation_put(operation); - } + if (ret) + goto err_put_active; + + return 0; + +err_put_active: + gb_operation_put_active(operation); +err_put: + gb_operation_put(operation); return ret; } @@ -722,32 +846,37 @@ static int gb_operation_response_send(struct gb_operation *operation, /* * This function is called when a message send request has completed. */ -void greybus_message_sent(struct greybus_host_device *hd, +void greybus_message_sent(struct gb_host_device *hd, struct gb_message *message, int status) { - struct gb_operation *operation; + struct gb_operation *operation = message->operation; + struct gb_connection *connection = operation->connection; /* * If the message was a response, we just need to drop our * reference to the operation. If an error occurred, report * it. * - * For requests, if there's no error, there's nothing more - * to do until the response arrives. If an error occurred - * attempting to send it, record that as the result of - * the operation and schedule its completion. + * For requests, if there's no error and the operation in not + * unidirectional, there's nothing more to do until the response + * arrives. If an error occurred attempting to send it, or if the + * operation is unidrectional, record the result of the operation and + * schedule its completion. */ - operation = message->operation; if (message == operation->response) { if (status) { - dev_err(&operation->connection->dev, - "error sending response: %d\n", status); + dev_err(&connection->hd->dev, + "%s: error sending response 0x%02x: %d\n", + connection->name, operation->type, status); } + gb_operation_put_active(operation); gb_operation_put(operation); - } else if (status) { - if (gb_operation_result_set(operation, status)) - queue_work(gb_operation_workqueue, &operation->work); + } else if (status || gb_operation_is_unidirectional(operation)) { + if (gb_operation_result_set(operation, status)) { + queue_work(gb_operation_completion_wq, + &operation->work); + } } } EXPORT_SYMBOL_GPL(greybus_message_sent); @@ -764,27 +893,30 @@ static void gb_connection_recv_request(struct gb_connection *connection, void *data, size_t size) { struct gb_operation *operation; + int ret; operation = gb_operation_create_incoming(connection, operation_id, type, data, size); if (!operation) { - dev_err(&connection->dev, "can't create operation\n"); - return; /* XXX Respond with pre-allocated ENOMEM */ + dev_err(&connection->hd->dev, + "%s: can't create incoming operation\n", + connection->name); + return; } - gb_operation_get_active(operation); + ret = gb_operation_get_active(operation); + if (ret) { + gb_operation_put(operation); + return; + } + trace_gb_message_recv_request(operation->request); /* - * Incoming requests are handled by arranging for the - * request handler to be the operation's callback function. - * - * The last thing the handler does is send a response - * message. The initial reference to the operation will be - * dropped when the handler returns. + * The initial reference to the operation will be dropped when the + * request handler returns. */ - operation->callback = gb_operation_request_handle; if (gb_operation_result_set(operation, -EINPROGRESS)) - queue_work(gb_operation_workqueue, &operation->work); + queue_work(connection->wq, &operation->work); } /* @@ -798,33 +930,57 @@ static void gb_connection_recv_request(struct gb_connection *connection, static void gb_connection_recv_response(struct gb_connection *connection, u16 operation_id, u8 result, void *data, size_t size) { + struct gb_operation_msg_hdr *header; struct gb_operation *operation; struct gb_message *message; int errno = gb_operation_status_map(result); size_t message_size; + if (!operation_id) { + dev_err_ratelimited(&connection->hd->dev, + "%s: invalid response id 0 received\n", + connection->name); + return; + } + operation = gb_operation_find_outgoing(connection, operation_id); if (!operation) { - dev_err(&connection->dev, "operation not found\n"); + dev_err_ratelimited(&connection->hd->dev, + "%s: unexpected response id 0x%04x received\n", + connection->name, operation_id); return; } message = operation->response; - message_size = sizeof(*message->header) + message->payload_size; - if (!errno && size != message_size) { - dev_err(&connection->dev, "bad message size (%zu != %zu)\n", - size, message_size); + header = message->header; + message_size = sizeof(*header) + message->payload_size; + if (!errno && size > message_size) { + dev_err_ratelimited(&connection->hd->dev, + "%s: malformed response 0x%02x received (%zu > %zu)\n", + connection->name, header->type, + size, message_size); errno = -EMSGSIZE; + } else if (!errno && size < message_size) { + if (gb_operation_short_response_allowed(operation)) { + message->payload_size = size - sizeof(*header); + } else { + dev_err_ratelimited(&connection->hd->dev, + "%s: short response 0x%02x received (%zu < %zu)\n", + connection->name, header->type, + size, message_size); + errno = -EMSGSIZE; + } } + trace_gb_message_recv_response(operation->response); /* We must ignore the payload if a bad status is returned */ if (errno) - size = sizeof(*message->header); + size = sizeof(*header); /* The rest will be handled in work queue context */ if (gb_operation_result_set(operation, errno)) { - memcpy(message->header, data, size); - queue_work(gb_operation_workqueue, &operation->work); + memcpy(header, data, size); + queue_work(gb_operation_completion_wq, &operation->work); } gb_operation_put(operation); @@ -839,17 +995,20 @@ void gb_connection_recv(struct gb_connection *connection, void *data, size_t size) { struct gb_operation_msg_hdr header; + struct device *dev = &connection->hd->dev; size_t msg_size; u16 operation_id; - if (connection->state != GB_CONNECTION_STATE_ENABLED) { - dev_err(&connection->dev, "dropping %zu received bytes\n", - size); + if (connection->state == GB_CONNECTION_STATE_DISABLED || + gb_connection_is_offloaded(connection)) { + dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n", + connection->name, size); return; } if (size < sizeof(header)) { - dev_err(&connection->dev, "message too small\n"); + dev_err_ratelimited(dev, "%s: short message received\n", + connection->name); return; } @@ -857,9 +1016,11 @@ void gb_connection_recv(struct gb_connection *connection, memcpy(&header, data, sizeof(header)); msg_size = le16_to_cpu(header.size); if (size < msg_size) { - dev_err(&connection->dev, - "incomplete message received: 0x%04x (%zu < %zu)\n", - le16_to_cpu(header.operation_id), size, msg_size); + dev_err_ratelimited(dev, + "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n", + connection->name, + le16_to_cpu(header.operation_id), + header.type, size, msg_size); return; /* XXX Should still complete operation */ } @@ -873,34 +1034,62 @@ void gb_connection_recv(struct gb_connection *connection, } /* - * Cancel an operation, and record the given error to indicate why. + * Cancel an outgoing operation synchronously, and record the given error to + * indicate why. */ void gb_operation_cancel(struct gb_operation *operation, int errno) { - if (gb_operation_is_incoming(operation)) { - /* Cancel response if it has been allocated */ - if (!gb_operation_result_set(operation, errno) && - !gb_operation_is_unidirectional(operation)) { - gb_message_cancel(operation->response); - } - } else { - if (gb_operation_result_set(operation, errno)) { - gb_message_cancel(operation->request); - gb_operation_put_active(operation); - gb_operation_put(operation); - } + if (WARN_ON(gb_operation_is_incoming(operation))) + return; + + if (gb_operation_result_set(operation, errno)) { + gb_message_cancel(operation->request); + queue_work(gb_operation_completion_wq, &operation->work); } + trace_gb_message_cancel_outgoing(operation->request); + + atomic_inc(&operation->waiters); + wait_event(gb_operation_cancellation_queue, + !gb_operation_is_active(operation)); + atomic_dec(&operation->waiters); } EXPORT_SYMBOL_GPL(gb_operation_cancel); +/* + * Cancel an incoming operation synchronously. Called during connection tear + * down. + */ +void gb_operation_cancel_incoming(struct gb_operation *operation, int errno) +{ + if (WARN_ON(!gb_operation_is_incoming(operation))) + return; + + if (!gb_operation_is_unidirectional(operation)) { + /* + * Make sure the request handler has submitted the response + * before cancelling it. + */ + flush_work(&operation->work); + if (!gb_operation_result_set(operation, errno)) + gb_message_cancel(operation->response); + } + trace_gb_message_cancel_incoming(operation->response); + + atomic_inc(&operation->waiters); + wait_event(gb_operation_cancellation_queue, + !gb_operation_is_active(operation)); + atomic_dec(&operation->waiters); +} + /** - * gb_operation_sync: implement a "simple" synchronous gb operation. + * gb_operation_sync_timeout() - implement a "simple" synchronous operation * @connection: the Greybus connection to send this to * @type: the type of operation to send * @request: pointer to a memory buffer to copy the request from * @request_size: size of @request * @response: pointer to a memory buffer to copy the response to * @response_size: the size of @response. + * @timeout: operation timeout in milliseconds * * This function implements a simple synchronous Greybus operation. It sends * the provided operation request and waits (sleeps) until the corresponding @@ -915,9 +1104,10 @@ EXPORT_SYMBOL_GPL(gb_operation_cancel); * * If there is an error, the response buffer is left alone. */ -int gb_operation_sync(struct gb_connection *connection, int type, - void *request, int request_size, - void *response, int response_size) +int gb_operation_sync_timeout(struct gb_connection *connection, int type, + void *request, int request_size, + void *response, int response_size, + unsigned int timeout) { struct gb_operation *operation; int ret; @@ -935,21 +1125,70 @@ int gb_operation_sync(struct gb_connection *connection, int type, if (request_size) memcpy(operation->request->payload, request, request_size); - ret = gb_operation_request_send_sync(operation); + ret = gb_operation_request_send_sync_timeout(operation, timeout); if (ret) { - dev_err(&connection->dev, "synchronous operation failed: %d\n", - ret); + dev_err(&connection->hd->dev, + "%s: synchronous operation of type 0x%02x failed: %d\n", + connection->name, type, ret); } else { if (response_size) { memcpy(response, operation->response->payload, response_size); } } - gb_operation_destroy(operation); + + gb_operation_put(operation); + + return ret; +} +EXPORT_SYMBOL_GPL(gb_operation_sync_timeout); + +/** + * gb_operation_unidirectional_timeout() - initiate a unidirectional operation + * @connection: connection to use + * @type: type of operation to send + * @request: memory buffer to copy the request from + * @request_size: size of @request + * @timeout: send timeout in milliseconds + * + * Initiate a unidirectional operation by sending a request message and + * waiting for it to be acknowledged as sent by the host device. + * + * Note that successful send of a unidirectional operation does not imply that + * the request as actually reached the remote end of the connection. + */ +int gb_operation_unidirectional_timeout(struct gb_connection *connection, + int type, void *request, int request_size, + unsigned int timeout) +{ + struct gb_operation *operation; + int ret; + + if (request_size && !request) + return -EINVAL; + + operation = gb_operation_create_flags(connection, type, + request_size, 0, + GB_OPERATION_FLAG_UNIDIRECTIONAL, + GFP_KERNEL); + if (!operation) + return -ENOMEM; + + if (request_size) + memcpy(operation->request->payload, request, request_size); + + ret = gb_operation_request_send_sync_timeout(operation, timeout); + if (ret) { + dev_err(&connection->hd->dev, + "%s: unidirectional operation of type 0x%02x failed: %d\n", + connection->name, type, ret); + } + + gb_operation_put(operation); return ret; } -EXPORT_SYMBOL_GPL(gb_operation_sync); +EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout); int __init gb_operation_init(void) { @@ -963,12 +1202,14 @@ int __init gb_operation_init(void) if (!gb_operation_cache) goto err_destroy_message_cache; - gb_operation_workqueue = alloc_workqueue("greybus_operation", 0, 1); - if (!gb_operation_workqueue) - goto err_operation; + gb_operation_completion_wq = alloc_workqueue("greybus_completion", + 0, 0); + if (!gb_operation_completion_wq) + goto err_destroy_operation_cache; return 0; -err_operation: + +err_destroy_operation_cache: kmem_cache_destroy(gb_operation_cache); gb_operation_cache = NULL; err_destroy_message_cache: @@ -980,8 +1221,8 @@ err_destroy_message_cache: void gb_operation_exit(void) { - destroy_workqueue(gb_operation_workqueue); - gb_operation_workqueue = NULL; + destroy_workqueue(gb_operation_completion_wq); + gb_operation_completion_wq = NULL; kmem_cache_destroy(gb_operation_cache); gb_operation_cache = NULL; kmem_cache_destroy(gb_message_cache);