4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/wait.h>
15 #include <linux/workqueue.h>
18 #include "greybus_trace.h"
20 static struct kmem_cache *gb_operation_cache;
21 static struct kmem_cache *gb_message_cache;
23 /* Workqueue to handle Greybus operation completions. */
24 static struct workqueue_struct *gb_operation_completion_wq;
26 /* Wait queue for synchronous cancellations. */
27 static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
30 * Protects updates to operation->errno.
32 static DEFINE_SPINLOCK(gb_operations_lock);
34 static int gb_operation_response_send(struct gb_operation *operation,
38 * Increment operation active count and add to connection list unless the
39 * connection is going away.
41 * Caller holds operation reference.
43 static int gb_operation_get_active(struct gb_operation *operation)
45 struct gb_connection *connection = operation->connection;
48 spin_lock_irqsave(&connection->lock, flags);
50 if (connection->state != GB_CONNECTION_STATE_ENABLED &&
51 connection->state != GB_CONNECTION_STATE_ENABLED_TX &&
52 !gb_operation_is_incoming(operation)) {
53 spin_unlock_irqrestore(&connection->lock, flags);
57 if (operation->active++ == 0)
58 list_add_tail(&operation->links, &connection->operations);
60 spin_unlock_irqrestore(&connection->lock, flags);
65 /* Caller holds operation reference. */
66 static void gb_operation_put_active(struct gb_operation *operation)
68 struct gb_connection *connection = operation->connection;
71 spin_lock_irqsave(&connection->lock, flags);
72 if (--operation->active == 0) {
73 list_del(&operation->links);
74 if (atomic_read(&operation->waiters))
75 wake_up(&gb_operation_cancellation_queue);
77 spin_unlock_irqrestore(&connection->lock, flags);
80 static bool gb_operation_is_active(struct gb_operation *operation)
82 struct gb_connection *connection = operation->connection;
86 spin_lock_irqsave(&connection->lock, flags);
87 ret = operation->active;
88 spin_unlock_irqrestore(&connection->lock, flags);
94 * Set an operation's result.
96 * Initially an outgoing operation's errno value is -EBADR.
97 * If no error occurs before sending the request message the only
98 * valid value operation->errno can be set to is -EINPROGRESS,
99 * indicating the request has been (or rather is about to be) sent.
100 * At that point nobody should be looking at the result until the
103 * The first time the result gets set after the request has been
104 * sent, that result "sticks." That is, if two concurrent threads
105 * race to set the result, the first one wins. The return value
106 * tells the caller whether its result was recorded; if not the
107 * caller has nothing more to do.
109 * The result value -EILSEQ is reserved to signal an implementation
110 * error; if it's ever observed, the code performing the request has
111 * done something fundamentally wrong. It is an error to try to set
112 * the result to -EBADR, and attempts to do so result in a warning,
113 * and -EILSEQ is used instead. Similarly, the only valid result
114 * value to set for an operation in initial state is -EINPROGRESS.
115 * Attempts to do otherwise will also record a (successful) -EILSEQ
118 static bool gb_operation_result_set(struct gb_operation *operation, int result)
123 if (result == -EINPROGRESS) {
125 * -EINPROGRESS is used to indicate the request is
126 * in flight. It should be the first result value
127 * set after the initial -EBADR. Issue a warning
128 * and record an implementation error if it's
129 * set at any other time.
131 spin_lock_irqsave(&gb_operations_lock, flags);
132 prev = operation->errno;
134 operation->errno = result;
136 operation->errno = -EILSEQ;
137 spin_unlock_irqrestore(&gb_operations_lock, flags);
138 WARN_ON(prev != -EBADR);
144 * The first result value set after a request has been sent
145 * will be the final result of the operation. Subsequent
146 * attempts to set the result are ignored.
148 * Note that -EBADR is a reserved "initial state" result
149 * value. Attempts to set this value result in a warning,
150 * and the result code is set to -EILSEQ instead.
152 if (WARN_ON(result == -EBADR))
153 result = -EILSEQ; /* Nobody should be setting -EBADR */
155 spin_lock_irqsave(&gb_operations_lock, flags);
156 prev = operation->errno;
157 if (prev == -EINPROGRESS)
158 operation->errno = result; /* First and final result */
159 spin_unlock_irqrestore(&gb_operations_lock, flags);
161 return prev == -EINPROGRESS;
164 int gb_operation_result(struct gb_operation *operation)
166 int result = operation->errno;
168 WARN_ON(result == -EBADR);
169 WARN_ON(result == -EINPROGRESS);
173 EXPORT_SYMBOL_GPL(gb_operation_result);
176 * Looks up an outgoing operation on a connection and returns a refcounted
177 * pointer if found, or NULL otherwise.
179 static struct gb_operation *
180 gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
182 struct gb_operation *operation;
186 spin_lock_irqsave(&connection->lock, flags);
187 list_for_each_entry(operation, &connection->operations, links)
188 if (operation->id == operation_id &&
189 !gb_operation_is_incoming(operation)) {
190 gb_operation_get(operation);
194 spin_unlock_irqrestore(&connection->lock, flags);
196 return found ? operation : NULL;
199 static int gb_message_send(struct gb_message *message, gfp_t gfp)
201 struct gb_connection *connection = message->operation->connection;
203 trace_gb_message_send(message);
204 return connection->hd->driver->message_send(connection->hd,
205 connection->hd_cport_id,
211 * Cancel a message we have passed to the host device layer to be sent.
213 static void gb_message_cancel(struct gb_message *message)
215 struct gb_host_device *hd = message->operation->connection->hd;
217 hd->driver->message_cancel(message);
220 static void gb_operation_request_handle(struct gb_operation *operation)
222 struct gb_connection *connection = operation->connection;
226 if (connection->handler) {
227 status = connection->handler(operation);
229 dev_err(&connection->hd->dev,
230 "%s: unexpected incoming request of type 0x%02x\n",
231 connection->name, operation->type);
233 status = -EPROTONOSUPPORT;
236 ret = gb_operation_response_send(operation, status);
238 dev_err(&connection->hd->dev,
239 "%s: failed to send response %d for type 0x%02x: %d\n",
240 connection->name, status, operation->type, ret);
246 * Process operation work.
248 * For incoming requests, call the protocol request handler. The operation
249 * result should be -EINPROGRESS at this point.
251 * For outgoing requests, the operation result value should have
252 * been set before queueing this. The operation callback function
253 * allows the original requester to know the request has completed
254 * and its result is available.
256 static void gb_operation_work(struct work_struct *work)
258 struct gb_operation *operation;
260 operation = container_of(work, struct gb_operation, work);
262 if (gb_operation_is_incoming(operation))
263 gb_operation_request_handle(operation);
265 operation->callback(operation);
267 gb_operation_put_active(operation);
268 gb_operation_put(operation);
271 static void gb_operation_message_init(struct gb_host_device *hd,
272 struct gb_message *message, u16 operation_id,
273 size_t payload_size, u8 type)
275 struct gb_operation_msg_hdr *header;
277 header = message->buffer;
279 message->header = header;
280 message->payload = payload_size ? header + 1 : NULL;
281 message->payload_size = payload_size;
284 * The type supplied for incoming message buffers will be
285 * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
286 * arriving data so there's no need to initialize the message header.
288 if (type != GB_REQUEST_TYPE_INVALID) {
289 u16 message_size = (u16)(sizeof(*header) + payload_size);
292 * For a request, the operation id gets filled in
293 * when the message is sent. For a response, it
294 * will be copied from the request by the caller.
296 * The result field in a request message must be
297 * zero. It will be set just prior to sending for
300 header->size = cpu_to_le16(message_size);
301 header->operation_id = 0;
308 * Allocate a message to be used for an operation request or response.
309 * Both types of message contain a common header. The request message
310 * for an outgoing operation is outbound, as is the response message
311 * for an incoming operation. The message header for an outbound
312 * message is partially initialized here.
314 * The headers for inbound messages don't need to be initialized;
315 * they'll be filled in by arriving data.
317 * Our message buffers have the following layout:
318 * message header \_ these combined are
319 * message payload / the message size
321 static struct gb_message *
322 gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
323 size_t payload_size, gfp_t gfp_flags)
325 struct gb_message *message;
326 struct gb_operation_msg_hdr *header;
327 size_t message_size = payload_size + sizeof(*header);
329 if (message_size > hd->buffer_size_max) {
330 dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
331 message_size, hd->buffer_size_max);
335 /* Allocate the message structure and buffer. */
336 message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
340 message->buffer = kzalloc(message_size, gfp_flags);
341 if (!message->buffer)
342 goto err_free_message;
344 /* Initialize the message. Operation id is filled in later. */
345 gb_operation_message_init(hd, message, 0, payload_size, type);
350 kmem_cache_free(gb_message_cache, message);
355 static void gb_operation_message_free(struct gb_message *message)
357 kfree(message->buffer);
358 kmem_cache_free(gb_message_cache, message);
362 * Map an enum gb_operation_status value (which is represented in a
363 * message as a single byte) to an appropriate Linux negative errno.
365 static int gb_operation_status_map(u8 status)
370 case GB_OP_INTERRUPTED:
374 case GB_OP_NO_MEMORY:
376 case GB_OP_PROTOCOL_BAD:
377 return -EPROTONOSUPPORT;
384 case GB_OP_NONEXISTENT:
386 case GB_OP_MALFUNCTION:
388 case GB_OP_UNKNOWN_ERROR:
395 * Map a Linux errno value (from operation->errno) into the value
396 * that should represent it in a response message status sent
397 * over the wire. Returns an enum gb_operation_status value (which
398 * is represented in a message as a single byte).
400 static u8 gb_operation_errno_map(int errno)
404 return GB_OP_SUCCESS;
406 return GB_OP_INTERRUPTED;
408 return GB_OP_TIMEOUT;
410 return GB_OP_NO_MEMORY;
411 case -EPROTONOSUPPORT:
412 return GB_OP_PROTOCOL_BAD;
414 return GB_OP_OVERFLOW; /* Could be underflow too */
416 return GB_OP_INVALID;
420 return GB_OP_MALFUNCTION;
422 return GB_OP_NONEXISTENT;
425 return GB_OP_UNKNOWN_ERROR;
429 bool gb_operation_response_alloc(struct gb_operation *operation,
430 size_t response_size, gfp_t gfp)
432 struct gb_host_device *hd = operation->connection->hd;
433 struct gb_operation_msg_hdr *request_header;
434 struct gb_message *response;
437 type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
438 response = gb_operation_message_alloc(hd, type, response_size, gfp);
441 response->operation = operation;
444 * Size and type get initialized when the message is
445 * allocated. The errno will be set before sending. All
446 * that's left is the operation id, which we copy from the
447 * request message header (as-is, in little-endian order).
449 request_header = operation->request->header;
450 response->header->operation_id = request_header->operation_id;
451 operation->response = response;
455 EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
458 * Create a Greybus operation to be sent over the given connection.
459 * The request buffer will be big enough for a payload of the given
462 * For outgoing requests, the request message's header will be
463 * initialized with the type of the request and the message size.
464 * Outgoing operations must also specify the response buffer size,
465 * which must be sufficient to hold all expected response data. The
466 * response message header will eventually be overwritten, so there's
467 * no need to initialize it here.
469 * Request messages for incoming operations can arrive in interrupt
470 * context, so they must be allocated with GFP_ATOMIC. In this case
471 * the request buffer will be immediately overwritten, so there is
472 * no need to initialize the message header. Responsibility for
473 * allocating a response buffer lies with the incoming request
474 * handler for a protocol. So we don't allocate that here.
476 * Returns a pointer to the new operation or a null pointer if an
479 static struct gb_operation *
480 gb_operation_create_common(struct gb_connection *connection, u8 type,
481 size_t request_size, size_t response_size,
482 unsigned long op_flags, gfp_t gfp_flags)
484 struct gb_host_device *hd = connection->hd;
485 struct gb_operation *operation;
487 operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
490 operation->connection = connection;
492 operation->request = gb_operation_message_alloc(hd, type, request_size,
494 if (!operation->request)
496 operation->request->operation = operation;
498 /* Allocate the response buffer for outgoing operations */
499 if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
500 if (!gb_operation_response_alloc(operation, response_size,
506 operation->flags = op_flags;
507 operation->type = type;
508 operation->errno = -EBADR; /* Initial value--means "never set" */
510 INIT_WORK(&operation->work, gb_operation_work);
511 init_completion(&operation->completion);
512 kref_init(&operation->kref);
513 atomic_set(&operation->waiters, 0);
518 gb_operation_message_free(operation->request);
520 kmem_cache_free(gb_operation_cache, operation);
526 * Create a new operation associated with the given connection. The
527 * request and response sizes provided are the number of bytes
528 * required to hold the request/response payload only. Both of
529 * these are allowed to be 0. Note that 0x00 is reserved as an
530 * invalid operation type for all protocols, and this is enforced
533 struct gb_operation *
534 gb_operation_create_flags(struct gb_connection *connection,
535 u8 type, size_t request_size,
536 size_t response_size, unsigned long flags,
539 if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
541 if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
542 type &= ~GB_MESSAGE_TYPE_RESPONSE;
544 if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
545 flags &= GB_OPERATION_FLAG_USER_MASK;
547 return gb_operation_create_common(connection, type,
548 request_size, response_size,
551 EXPORT_SYMBOL_GPL(gb_operation_create_flags);
553 size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
555 struct gb_host_device *hd = connection->hd;
557 return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
559 EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
561 static struct gb_operation *
562 gb_operation_create_incoming(struct gb_connection *connection, u16 id,
563 u8 type, void *data, size_t size)
565 struct gb_operation *operation;
567 unsigned long flags = GB_OPERATION_FLAG_INCOMING;
569 /* Caller has made sure we at least have a message header. */
570 request_size = size - sizeof(struct gb_operation_msg_hdr);
573 flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
575 operation = gb_operation_create_common(connection, type,
577 GB_REQUEST_TYPE_INVALID,
583 memcpy(operation->request->header, data, size);
589 * Get an additional reference on an operation.
591 void gb_operation_get(struct gb_operation *operation)
593 kref_get(&operation->kref);
595 EXPORT_SYMBOL_GPL(gb_operation_get);
598 * Destroy a previously created operation.
600 static void _gb_operation_destroy(struct kref *kref)
602 struct gb_operation *operation;
604 operation = container_of(kref, struct gb_operation, kref);
606 if (operation->response)
607 gb_operation_message_free(operation->response);
608 gb_operation_message_free(operation->request);
610 kmem_cache_free(gb_operation_cache, operation);
614 * Drop a reference on an operation, and destroy it when the last
617 void gb_operation_put(struct gb_operation *operation)
619 if (WARN_ON(!operation))
622 kref_put(&operation->kref, _gb_operation_destroy);
624 EXPORT_SYMBOL_GPL(gb_operation_put);
626 /* Tell the requester we're done */
627 static void gb_operation_sync_callback(struct gb_operation *operation)
629 complete(&operation->completion);
633 * gb_operation_request_send() - send an operation request message
634 * @operation: the operation to initiate
635 * @callback: the operation completion callback
636 * @gfp: the memory flags to use for any allocations
638 * The caller has filled in any payload so the request message is ready to go.
639 * The callback function supplied will be called when the response message has
640 * arrived, a unidirectional request has been sent, or the operation is
641 * cancelled, indicating that the operation is complete. The callback function
642 * can fetch the result of the operation using gb_operation_result() if
645 * Return: 0 if the request was successfully queued in the host-driver queues,
646 * or a negative errno.
648 int gb_operation_request_send(struct gb_operation *operation,
649 gb_operation_callback callback,
652 struct gb_connection *connection = operation->connection;
653 struct gb_operation_msg_hdr *header;
661 * Record the callback function, which is executed in
662 * non-atomic (workqueue) context when the final result
663 * of an operation has been set.
665 operation->callback = callback;
668 * Assign the operation's id, and store it in the request header.
669 * Zero is a reserved operation id for unidirectional operations.
671 if (gb_operation_is_unidirectional(operation)) {
674 cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
675 operation->id = (u16)(cycle % U16_MAX + 1);
678 header = operation->request->header;
679 header->operation_id = cpu_to_le16(operation->id);
681 gb_operation_result_set(operation, -EINPROGRESS);
684 * Get an extra reference on the operation. It'll be dropped when the
685 * operation completes.
687 gb_operation_get(operation);
688 ret = gb_operation_get_active(operation);
692 ret = gb_message_send(operation->request, gfp);
699 gb_operation_put_active(operation);
701 gb_operation_put(operation);
705 EXPORT_SYMBOL_GPL(gb_operation_request_send);
708 * Send a synchronous operation. This function is expected to
709 * block, returning only when the response has arrived, (or when an
710 * error is detected. The return value is the result of the
713 int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
714 unsigned int timeout)
717 unsigned long timeout_jiffies;
719 ret = gb_operation_request_send(operation, gb_operation_sync_callback,
725 timeout_jiffies = msecs_to_jiffies(timeout);
727 timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
729 ret = wait_for_completion_interruptible_timeout(&operation->completion,
732 /* Cancel the operation if interrupted */
733 gb_operation_cancel(operation, -ECANCELED);
734 } else if (ret == 0) {
735 /* Cancel the operation if op timed out */
736 gb_operation_cancel(operation, -ETIMEDOUT);
739 return gb_operation_result(operation);
741 EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
744 * Send a response for an incoming operation request. A non-zero
745 * errno indicates a failed operation.
747 * If there is any response payload, the incoming request handler is
748 * responsible for allocating the response message. Otherwise the
749 * it can simply supply the result errno; this function will
750 * allocate the response message if necessary.
752 static int gb_operation_response_send(struct gb_operation *operation,
755 struct gb_connection *connection = operation->connection;
758 if (!operation->response &&
759 !gb_operation_is_unidirectional(operation)) {
760 if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
764 /* Record the result */
765 if (!gb_operation_result_set(operation, errno)) {
766 dev_err(&connection->hd->dev, "request result already set\n");
767 return -EIO; /* Shouldn't happen */
770 /* Sender of request does not care about response. */
771 if (gb_operation_is_unidirectional(operation))
774 /* Reference will be dropped when message has been sent. */
775 gb_operation_get(operation);
776 ret = gb_operation_get_active(operation);
780 /* Fill in the response header and send it */
781 operation->response->header->result = gb_operation_errno_map(errno);
783 ret = gb_message_send(operation->response, GFP_KERNEL);
790 gb_operation_put_active(operation);
792 gb_operation_put(operation);
798 * This function is called when a message send request has completed.
800 void greybus_message_sent(struct gb_host_device *hd,
801 struct gb_message *message, int status)
803 struct gb_operation *operation = message->operation;
804 struct gb_connection *connection = operation->connection;
807 * If the message was a response, we just need to drop our
808 * reference to the operation. If an error occurred, report
811 * For requests, if there's no error and the operation in not
812 * unidirectional, there's nothing more to do until the response
813 * arrives. If an error occurred attempting to send it, or if the
814 * operation is unidrectional, record the result of the operation and
815 * schedule its completion.
817 if (message == operation->response) {
819 dev_err(&connection->hd->dev,
820 "%s: error sending response 0x%02x: %d\n",
821 connection->name, operation->type, status);
824 gb_operation_put_active(operation);
825 gb_operation_put(operation);
826 } else if (status || gb_operation_is_unidirectional(operation)) {
827 if (gb_operation_result_set(operation, status)) {
828 queue_work(gb_operation_completion_wq,
833 EXPORT_SYMBOL_GPL(greybus_message_sent);
836 * We've received data on a connection, and it doesn't look like a
837 * response, so we assume it's a request.
839 * This is called in interrupt context, so just copy the incoming
840 * data into the request buffer and handle the rest via workqueue.
842 static void gb_connection_recv_request(struct gb_connection *connection,
843 u16 operation_id, u8 type,
844 void *data, size_t size)
846 struct gb_operation *operation;
849 operation = gb_operation_create_incoming(connection, operation_id,
852 dev_err(&connection->hd->dev,
853 "%s: can't create incoming operation\n",
858 ret = gb_operation_get_active(operation);
860 gb_operation_put(operation);
863 trace_gb_message_recv_request(operation->request);
866 * The initial reference to the operation will be dropped when the
867 * request handler returns.
869 if (gb_operation_result_set(operation, -EINPROGRESS))
870 queue_work(connection->wq, &operation->work);
874 * We've received data that appears to be an operation response
875 * message. Look up the operation, and record that we've received
878 * This is called in interrupt context, so just copy the incoming
879 * data into the response buffer and handle the rest via workqueue.
881 static void gb_connection_recv_response(struct gb_connection *connection,
882 u16 operation_id, u8 result, void *data, size_t size)
884 struct gb_operation_msg_hdr *header;
885 struct gb_operation *operation;
886 struct gb_message *message;
887 int errno = gb_operation_status_map(result);
891 dev_err(&connection->hd->dev,
892 "%s: invalid response id 0 received\n",
897 operation = gb_operation_find_outgoing(connection, operation_id);
899 dev_err(&connection->hd->dev,
900 "%s: unexpected response id 0x%04x received\n",
901 connection->name, operation_id);
905 message = operation->response;
906 header = message->header;
907 message_size = sizeof(*header) + message->payload_size;
908 if (!errno && size > message_size) {
909 dev_err(&connection->hd->dev,
910 "%s: malformed response 0x%02x received (%zu > %zu)\n",
911 connection->name, header->type,
914 } else if (!errno && size < message_size) {
915 if (gb_operation_short_response_allowed(operation)) {
916 message->payload_size = size - sizeof(*header);
918 dev_err(&connection->hd->dev,
919 "%s: short response 0x%02x received (%zu < %zu)\n",
920 connection->name, header->type,
925 trace_gb_message_recv_response(operation->response);
927 /* We must ignore the payload if a bad status is returned */
929 size = sizeof(*header);
931 /* The rest will be handled in work queue context */
932 if (gb_operation_result_set(operation, errno)) {
933 memcpy(header, data, size);
934 queue_work(gb_operation_completion_wq, &operation->work);
937 gb_operation_put(operation);
941 * Handle data arriving on a connection. As soon as we return the
942 * supplied data buffer will be reused (so unless we do something
943 * with, it's effectively dropped).
945 void gb_connection_recv(struct gb_connection *connection,
946 void *data, size_t size)
948 struct gb_operation_msg_hdr header;
949 struct device *dev = &connection->hd->dev;
953 if (connection->state != GB_CONNECTION_STATE_ENABLED &&
954 connection->state != GB_CONNECTION_STATE_ENABLED_TX) {
955 dev_warn(dev, "%s: dropping %zu received bytes\n",
956 connection->name, size);
960 if (size < sizeof(header)) {
961 dev_err(dev, "%s: short message received\n", connection->name);
965 /* Use memcpy as data may be unaligned */
966 memcpy(&header, data, sizeof(header));
967 msg_size = le16_to_cpu(header.size);
968 if (size < msg_size) {
970 "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
971 connection->name, le16_to_cpu(header.operation_id),
972 header.type, size, msg_size);
973 return; /* XXX Should still complete operation */
976 operation_id = le16_to_cpu(header.operation_id);
977 if (header.type & GB_MESSAGE_TYPE_RESPONSE)
978 gb_connection_recv_response(connection, operation_id,
979 header.result, data, msg_size);
981 gb_connection_recv_request(connection, operation_id,
982 header.type, data, msg_size);
986 * Cancel an outgoing operation synchronously, and record the given error to
989 void gb_operation_cancel(struct gb_operation *operation, int errno)
991 if (WARN_ON(gb_operation_is_incoming(operation)))
994 if (gb_operation_result_set(operation, errno)) {
995 gb_message_cancel(operation->request);
996 queue_work(gb_operation_completion_wq, &operation->work);
998 trace_gb_message_cancel_outgoing(operation->request);
1000 atomic_inc(&operation->waiters);
1001 wait_event(gb_operation_cancellation_queue,
1002 !gb_operation_is_active(operation));
1003 atomic_dec(&operation->waiters);
1005 EXPORT_SYMBOL_GPL(gb_operation_cancel);
1008 * Cancel an incoming operation synchronously. Called during connection tear
1011 void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
1013 if (WARN_ON(!gb_operation_is_incoming(operation)))
1016 if (!gb_operation_is_unidirectional(operation)) {
1018 * Make sure the request handler has submitted the response
1019 * before cancelling it.
1021 flush_work(&operation->work);
1022 if (!gb_operation_result_set(operation, errno))
1023 gb_message_cancel(operation->response);
1025 trace_gb_message_cancel_incoming(operation->response);
1027 atomic_inc(&operation->waiters);
1028 wait_event(gb_operation_cancellation_queue,
1029 !gb_operation_is_active(operation));
1030 atomic_dec(&operation->waiters);
1034 * gb_operation_sync_timeout() - implement a "simple" synchronous operation
1035 * @connection: the Greybus connection to send this to
1036 * @type: the type of operation to send
1037 * @request: pointer to a memory buffer to copy the request from
1038 * @request_size: size of @request
1039 * @response: pointer to a memory buffer to copy the response to
1040 * @response_size: the size of @response.
1041 * @timeout: operation timeout in milliseconds
1043 * This function implements a simple synchronous Greybus operation. It sends
1044 * the provided operation request and waits (sleeps) until the corresponding
1045 * operation response message has been successfully received, or an error
1046 * occurs. @request and @response are buffers to hold the request and response
1047 * data respectively, and if they are not NULL, their size must be specified in
1048 * @request_size and @response_size.
1050 * If a response payload is to come back, and @response is not NULL,
1051 * @response_size number of bytes will be copied into @response if the operation
1054 * If there is an error, the response buffer is left alone.
1056 int gb_operation_sync_timeout(struct gb_connection *connection, int type,
1057 void *request, int request_size,
1058 void *response, int response_size,
1059 unsigned int timeout)
1061 struct gb_operation *operation;
1064 if ((response_size && !response) ||
1065 (request_size && !request))
1068 operation = gb_operation_create(connection, type,
1069 request_size, response_size,
1075 memcpy(operation->request->payload, request, request_size);
1077 ret = gb_operation_request_send_sync_timeout(operation, timeout);
1079 dev_err(&connection->hd->dev,
1080 "%s: synchronous operation of type 0x%02x failed: %d\n",
1081 connection->name, type, ret);
1083 if (response_size) {
1084 memcpy(response, operation->response->payload,
1089 gb_operation_put(operation);
1093 EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
1096 * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1097 * @connection: connection to use
1098 * @type: type of operation to send
1099 * @request: memory buffer to copy the request from
1100 * @request_size: size of @request
1101 * @timeout: send timeout in milliseconds
1103 * Initiate a unidirectional operation by sending a request message and
1104 * waiting for it to be acknowledged as sent by the host device.
1106 * Note that successful send of a unidirectional operation does not imply that
1107 * the request as actually reached the remote end of the connection.
1109 int gb_operation_unidirectional_timeout(struct gb_connection *connection,
1110 int type, void *request, int request_size,
1111 unsigned int timeout)
1113 struct gb_operation *operation;
1116 if (request_size && !request)
1119 operation = gb_operation_create_flags(connection, type,
1121 GB_OPERATION_FLAG_UNIDIRECTIONAL,
1127 memcpy(operation->request->payload, request, request_size);
1129 ret = gb_operation_request_send_sync_timeout(operation, timeout);
1131 dev_err(&connection->hd->dev,
1132 "%s: unidirectional operation of type 0x%02x failed: %d\n",
1133 connection->name, type, ret);
1136 gb_operation_put(operation);
1140 EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
1142 int __init gb_operation_init(void)
1144 gb_message_cache = kmem_cache_create("gb_message_cache",
1145 sizeof(struct gb_message), 0, 0, NULL);
1146 if (!gb_message_cache)
1149 gb_operation_cache = kmem_cache_create("gb_operation_cache",
1150 sizeof(struct gb_operation), 0, 0, NULL);
1151 if (!gb_operation_cache)
1152 goto err_destroy_message_cache;
1154 gb_operation_completion_wq = alloc_workqueue("greybus_completion",
1156 if (!gb_operation_completion_wq)
1157 goto err_destroy_operation_cache;
1161 err_destroy_operation_cache:
1162 kmem_cache_destroy(gb_operation_cache);
1163 gb_operation_cache = NULL;
1164 err_destroy_message_cache:
1165 kmem_cache_destroy(gb_message_cache);
1166 gb_message_cache = NULL;
1171 void gb_operation_exit(void)
1173 destroy_workqueue(gb_operation_completion_wq);
1174 gb_operation_completion_wq = NULL;
1175 kmem_cache_destroy(gb_operation_cache);
1176 gb_operation_cache = NULL;
1177 kmem_cache_destroy(gb_message_cache);
1178 gb_message_cache = NULL;