greybus: operation: clean up operation work function
[cascardo/linux.git] / drivers / staging / greybus / operation.c
1 /*
2  * Greybus operations
3  *
4  * Copyright 2014-2015 Google Inc.
5  * Copyright 2014-2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/wait.h>
15 #include <linux/workqueue.h>
16
17 #include "greybus.h"
18
19 /* The default amount of time a request is given to complete */
20 #define OPERATION_TIMEOUT_DEFAULT       1000    /* milliseconds */
21
22 static struct kmem_cache *gb_operation_cache;
23 static struct kmem_cache *gb_message_cache;
24
25 /* Workqueue to handle Greybus operation completions. */
26 static struct workqueue_struct *gb_operation_workqueue;
27
28 /* Wait queue for synchronous cancellations. */
29 static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
30
31 /*
32  * Protects access to connection operations lists, as well as
33  * updates to operation->errno.
34  */
35 static DEFINE_SPINLOCK(gb_operations_lock);
36
37 static int gb_operation_response_send(struct gb_operation *operation,
38                                         int errno);
39
40 /* Caller holds operation reference. */
41 static inline void gb_operation_get_active(struct gb_operation *operation)
42 {
43         atomic_inc(&operation->active);
44 }
45
46 /* Caller holds operation reference. */
47 static inline void gb_operation_put_active(struct gb_operation *operation)
48 {
49         if (atomic_dec_and_test(&operation->active)) {
50                 if (atomic_read(&operation->waiters))
51                         wake_up(&gb_operation_cancellation_queue);
52         }
53 }
54
55 static inline bool gb_operation_is_active(struct gb_operation *operation)
56 {
57         return atomic_read(&operation->active);
58 }
59
60 /*
61  * Set an operation's result.
62  *
63  * Initially an outgoing operation's errno value is -EBADR.
64  * If no error occurs before sending the request message the only
65  * valid value operation->errno can be set to is -EINPROGRESS,
66  * indicating the request has been (or rather is about to be) sent.
67  * At that point nobody should be looking at the result until the
68  * response arrives.
69  *
70  * The first time the result gets set after the request has been
71  * sent, that result "sticks."  That is, if two concurrent threads
72  * race to set the result, the first one wins.  The return value
73  * tells the caller whether its result was recorded; if not the
74  * caller has nothing more to do.
75  *
76  * The result value -EILSEQ is reserved to signal an implementation
77  * error; if it's ever observed, the code performing the request has
78  * done something fundamentally wrong.  It is an error to try to set
79  * the result to -EBADR, and attempts to do so result in a warning,
80  * and -EILSEQ is used instead.  Similarly, the only valid result
81  * value to set for an operation in initial state is -EINPROGRESS.
82  * Attempts to do otherwise will also record a (successful) -EILSEQ
83  * operation result.
84  */
85 static bool gb_operation_result_set(struct gb_operation *operation, int result)
86 {
87         unsigned long flags;
88         int prev;
89
90         if (result == -EINPROGRESS) {
91                 /*
92                  * -EINPROGRESS is used to indicate the request is
93                  * in flight.  It should be the first result value
94                  * set after the initial -EBADR.  Issue a warning
95                  * and record an implementation error if it's
96                  * set at any other time.
97                  */
98                 spin_lock_irqsave(&gb_operations_lock, flags);
99                 prev = operation->errno;
100                 if (prev == -EBADR)
101                         operation->errno = result;
102                 else
103                         operation->errno = -EILSEQ;
104                 spin_unlock_irqrestore(&gb_operations_lock, flags);
105                 WARN_ON(prev != -EBADR);
106
107                 return true;
108         }
109
110         /*
111          * The first result value set after a request has been sent
112          * will be the final result of the operation.  Subsequent
113          * attempts to set the result are ignored.
114          *
115          * Note that -EBADR is a reserved "initial state" result
116          * value.  Attempts to set this value result in a warning,
117          * and the result code is set to -EILSEQ instead.
118          */
119         if (WARN_ON(result == -EBADR))
120                 result = -EILSEQ; /* Nobody should be setting -EBADR */
121
122         spin_lock_irqsave(&gb_operations_lock, flags);
123         prev = operation->errno;
124         if (prev == -EINPROGRESS)
125                 operation->errno = result;      /* First and final result */
126         spin_unlock_irqrestore(&gb_operations_lock, flags);
127
128         return prev == -EINPROGRESS;
129 }
130
131 int gb_operation_result(struct gb_operation *operation)
132 {
133         int result = operation->errno;
134
135         WARN_ON(result == -EBADR);
136         WARN_ON(result == -EINPROGRESS);
137
138         return result;
139 }
140 EXPORT_SYMBOL_GPL(gb_operation_result);
141
142 /*
143  * Looks up an outgoing operation on a connection and returns a refcounted
144  * pointer if found, or NULL otherwise.
145  */
146 static struct gb_operation *
147 gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
148 {
149         struct gb_operation *operation;
150         unsigned long flags;
151         bool found = false;
152
153         spin_lock_irqsave(&gb_operations_lock, flags);
154         list_for_each_entry(operation, &connection->operations, links)
155                 if (operation->id == operation_id &&
156                                 !gb_operation_is_incoming(operation)) {
157                         gb_operation_get(operation);
158                         found = true;
159                         break;
160                 }
161         spin_unlock_irqrestore(&gb_operations_lock, flags);
162
163         return found ? operation : NULL;
164 }
165
166 static int gb_message_send(struct gb_message *message, gfp_t gfp)
167 {
168         struct gb_connection *connection = message->operation->connection;
169
170         return connection->hd->driver->message_send(connection->hd,
171                                         connection->hd_cport_id,
172                                         message,
173                                         GFP_KERNEL);
174 }
175
176 /*
177  * Cancel a message we have passed to the host device layer to be sent.
178  */
179 static void gb_message_cancel(struct gb_message *message)
180 {
181         struct greybus_host_device *hd = message->operation->connection->hd;
182
183         hd->driver->message_cancel(message);
184 }
185
186 static void gb_operation_request_handle(struct gb_operation *operation)
187 {
188         struct gb_protocol *protocol = operation->connection->protocol;
189         int status;
190         int ret;
191
192         if (!protocol)
193                 return;
194
195         if (protocol->request_recv) {
196                 status = protocol->request_recv(operation->type, operation);
197         } else {
198                 dev_err(&operation->connection->dev,
199                         "unexpected incoming request type 0x%02hhx\n",
200                         operation->type);
201
202                 status = -EPROTONOSUPPORT;
203         }
204
205         ret = gb_operation_response_send(operation, status);
206         if (ret) {
207                 dev_err(&operation->connection->dev,
208                         "failed to send response %d: %d\n",
209                         status, ret);
210                         return;
211         }
212 }
213
214 /*
215  * Process operation work.
216  *
217  * For incoming requests, call the protocol request handler. The operation
218  * result should be -EINPROGRESS at this point.
219  *
220  * For outgoing requests, the operation result value should have
221  * been set before queueing this.  The operation callback function
222  * allows the original requester to know the request has completed
223  * and its result is available.
224  */
225 static void gb_operation_work(struct work_struct *work)
226 {
227         struct gb_operation *operation;
228
229         operation = container_of(work, struct gb_operation, work);
230
231         if (gb_operation_is_incoming(operation))
232                 gb_operation_request_handle(operation);
233         else
234                 operation->callback(operation);
235
236         gb_operation_put_active(operation);
237         gb_operation_put(operation);
238 }
239
240 static void gb_operation_message_init(struct greybus_host_device *hd,
241                                 struct gb_message *message, u16 operation_id,
242                                 size_t payload_size, u8 type)
243 {
244         struct gb_operation_msg_hdr *header;
245
246         header = message->buffer;
247
248         message->header = header;
249         message->payload = payload_size ? header + 1 : NULL;
250         message->payload_size = payload_size;
251
252         /*
253          * The type supplied for incoming message buffers will be
254          * 0x00.  Such buffers will be overwritten by arriving data
255          * so there's no need to initialize the message header.
256          */
257         if (type != GB_OPERATION_TYPE_INVALID) {
258                 u16 message_size = (u16)(sizeof(*header) + payload_size);
259
260                 /*
261                  * For a request, the operation id gets filled in
262                  * when the message is sent.  For a response, it
263                  * will be copied from the request by the caller.
264                  *
265                  * The result field in a request message must be
266                  * zero.  It will be set just prior to sending for
267                  * a response.
268                  */
269                 header->size = cpu_to_le16(message_size);
270                 header->operation_id = 0;
271                 header->type = type;
272                 header->result = 0;
273         }
274 }
275
276 /*
277  * Allocate a message to be used for an operation request or response.
278  * Both types of message contain a common header.  The request message
279  * for an outgoing operation is outbound, as is the response message
280  * for an incoming operation.  The message header for an outbound
281  * message is partially initialized here.
282  *
283  * The headers for inbound messages don't need to be initialized;
284  * they'll be filled in by arriving data.
285  *
286  * Our message buffers have the following layout:
287  *      message header  \_ these combined are
288  *      message payload /  the message size
289  */
290 static struct gb_message *
291 gb_operation_message_alloc(struct greybus_host_device *hd, u8 type,
292                                 size_t payload_size, gfp_t gfp_flags)
293 {
294         struct gb_message *message;
295         struct gb_operation_msg_hdr *header;
296         size_t message_size = payload_size + sizeof(*header);
297
298         if (message_size > hd->buffer_size_max) {
299                 pr_warn("requested message size too big (%zu > %zu)\n",
300                                 message_size, hd->buffer_size_max);
301                 return NULL;
302         }
303
304         /* Allocate the message structure and buffer. */
305         message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
306         if (!message)
307                 return NULL;
308
309         message->buffer = kzalloc(message_size, gfp_flags);
310         if (!message->buffer)
311                 goto err_free_message;
312
313         /* Initialize the message.  Operation id is filled in later. */
314         gb_operation_message_init(hd, message, 0, payload_size, type);
315
316         return message;
317
318 err_free_message:
319         kmem_cache_free(gb_message_cache, message);
320
321         return NULL;
322 }
323
324 static void gb_operation_message_free(struct gb_message *message)
325 {
326         kfree(message->buffer);
327         kmem_cache_free(gb_message_cache, message);
328 }
329
330 /*
331  * Map an enum gb_operation_status value (which is represented in a
332  * message as a single byte) to an appropriate Linux negative errno.
333  */
334 static int gb_operation_status_map(u8 status)
335 {
336         switch (status) {
337         case GB_OP_SUCCESS:
338                 return 0;
339         case GB_OP_INTERRUPTED:
340                 return -EINTR;
341         case GB_OP_TIMEOUT:
342                 return -ETIMEDOUT;
343         case GB_OP_NO_MEMORY:
344                 return -ENOMEM;
345         case GB_OP_PROTOCOL_BAD:
346                 return -EPROTONOSUPPORT;
347         case GB_OP_OVERFLOW:
348                 return -EMSGSIZE;
349         case GB_OP_INVALID:
350                 return -EINVAL;
351         case GB_OP_RETRY:
352                 return -EAGAIN;
353         case GB_OP_NONEXISTENT:
354                 return -ENODEV;
355         case GB_OP_MALFUNCTION:
356                 return -EILSEQ;
357         case GB_OP_UNKNOWN_ERROR:
358         default:
359                 return -EIO;
360         }
361 }
362
363 /*
364  * Map a Linux errno value (from operation->errno) into the value
365  * that should represent it in a response message status sent
366  * over the wire.  Returns an enum gb_operation_status value (which
367  * is represented in a message as a single byte).
368  */
369 static u8 gb_operation_errno_map(int errno)
370 {
371         switch (errno) {
372         case 0:
373                 return GB_OP_SUCCESS;
374         case -EINTR:
375                 return GB_OP_INTERRUPTED;
376         case -ETIMEDOUT:
377                 return GB_OP_TIMEOUT;
378         case -ENOMEM:
379                 return GB_OP_NO_MEMORY;
380         case -EPROTONOSUPPORT:
381                 return GB_OP_PROTOCOL_BAD;
382         case -EMSGSIZE:
383                 return GB_OP_OVERFLOW;  /* Could be underflow too */
384         case -EINVAL:
385                 return GB_OP_INVALID;
386         case -EAGAIN:
387                 return GB_OP_RETRY;
388         case -EILSEQ:
389                 return GB_OP_MALFUNCTION;
390         case -ENODEV:
391                 return GB_OP_NONEXISTENT;
392         case -EIO:
393         default:
394                 return GB_OP_UNKNOWN_ERROR;
395         }
396 }
397
398 bool gb_operation_response_alloc(struct gb_operation *operation,
399                                         size_t response_size)
400 {
401         struct greybus_host_device *hd = operation->connection->hd;
402         struct gb_operation_msg_hdr *request_header;
403         struct gb_message *response;
404         u8 type;
405
406         type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
407         response = gb_operation_message_alloc(hd, type, response_size,
408                                                 GFP_KERNEL);
409         if (!response)
410                 return false;
411         response->operation = operation;
412
413         /*
414          * Size and type get initialized when the message is
415          * allocated.  The errno will be set before sending.  All
416          * that's left is the operation id, which we copy from the
417          * request message header (as-is, in little-endian order).
418          */
419         request_header = operation->request->header;
420         response->header->operation_id = request_header->operation_id;
421         operation->response = response;
422
423         return true;
424 }
425 EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
426
427 /*
428  * Create a Greybus operation to be sent over the given connection.
429  * The request buffer will be big enough for a payload of the given
430  * size.
431  *
432  * For outgoing requests, the request message's header will be
433  * initialized with the type of the request and the message size.
434  * Outgoing operations must also specify the response buffer size,
435  * which must be sufficient to hold all expected response data.  The
436  * response message header will eventually be overwritten, so there's
437  * no need to initialize it here.
438  *
439  * Request messages for incoming operations can arrive in interrupt
440  * context, so they must be allocated with GFP_ATOMIC.  In this case
441  * the request buffer will be immediately overwritten, so there is
442  * no need to initialize the message header.  Responsibility for
443  * allocating a response buffer lies with the incoming request
444  * handler for a protocol.  So we don't allocate that here.
445  *
446  * Returns a pointer to the new operation or a null pointer if an
447  * error occurs.
448  */
449 static struct gb_operation *
450 gb_operation_create_common(struct gb_connection *connection, u8 type,
451                                 size_t request_size, size_t response_size,
452                                 unsigned long op_flags, gfp_t gfp_flags)
453 {
454         struct greybus_host_device *hd = connection->hd;
455         struct gb_operation *operation;
456         unsigned long flags;
457
458         operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
459         if (!operation)
460                 return NULL;
461         operation->connection = connection;
462
463         operation->request = gb_operation_message_alloc(hd, type, request_size,
464                                                         gfp_flags);
465         if (!operation->request)
466                 goto err_cache;
467         operation->request->operation = operation;
468
469         /* Allocate the response buffer for outgoing operations */
470         if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
471                 if (!gb_operation_response_alloc(operation, response_size))
472                         goto err_request;
473         }
474
475         operation->flags = op_flags;
476         operation->type = type;
477         operation->errno = -EBADR;  /* Initial value--means "never set" */
478
479         INIT_WORK(&operation->work, gb_operation_work);
480         init_completion(&operation->completion);
481         kref_init(&operation->kref);
482         atomic_set(&operation->active, 0);
483         atomic_set(&operation->waiters, 0);
484
485         spin_lock_irqsave(&gb_operations_lock, flags);
486         list_add_tail(&operation->links, &connection->operations);
487         spin_unlock_irqrestore(&gb_operations_lock, flags);
488
489         return operation;
490
491 err_request:
492         gb_operation_message_free(operation->request);
493 err_cache:
494         kmem_cache_free(gb_operation_cache, operation);
495
496         return NULL;
497 }
498
499 /*
500  * Create a new operation associated with the given connection.  The
501  * request and response sizes provided are the number of bytes
502  * required to hold the request/response payload only.  Both of
503  * these are allowed to be 0.  Note that 0x00 is reserved as an
504  * invalid operation type for all protocols, and this is enforced
505  * here.
506  */
507 struct gb_operation *gb_operation_create(struct gb_connection *connection,
508                                         u8 type, size_t request_size,
509                                         size_t response_size,
510                                         gfp_t gfp)
511 {
512         if (WARN_ON_ONCE(type == GB_OPERATION_TYPE_INVALID))
513                 return NULL;
514         if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
515                 type &= ~GB_MESSAGE_TYPE_RESPONSE;
516
517         return gb_operation_create_common(connection, type,
518                                         request_size, response_size, 0, gfp);
519 }
520 EXPORT_SYMBOL_GPL(gb_operation_create);
521
522 size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
523 {
524         struct greybus_host_device *hd = connection->hd;
525
526         return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
527 }
528 EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
529
530 static struct gb_operation *
531 gb_operation_create_incoming(struct gb_connection *connection, u16 id,
532                                 u8 type, void *data, size_t size)
533 {
534         struct gb_operation *operation;
535         size_t request_size;
536         unsigned long flags = GB_OPERATION_FLAG_INCOMING;
537
538         /* Caller has made sure we at least have a message header. */
539         request_size = size - sizeof(struct gb_operation_msg_hdr);
540
541         if (!id)
542                 flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
543
544         operation = gb_operation_create_common(connection, type,
545                                         request_size, 0, flags, GFP_ATOMIC);
546         if (!operation)
547                 return NULL;
548
549         operation->id = id;
550         memcpy(operation->request->header, data, size);
551
552         return operation;
553 }
554
555 /*
556  * Get an additional reference on an operation.
557  */
558 void gb_operation_get(struct gb_operation *operation)
559 {
560         kref_get(&operation->kref);
561 }
562 EXPORT_SYMBOL_GPL(gb_operation_get);
563
564 /*
565  * Destroy a previously created operation.
566  */
567 static void _gb_operation_destroy(struct kref *kref)
568 {
569         struct gb_operation *operation;
570
571         operation = container_of(kref, struct gb_operation, kref);
572
573         /* XXX Make sure it's not in flight */
574         list_del(&operation->links);
575         spin_unlock(&gb_operations_lock);
576
577         if (operation->response)
578                 gb_operation_message_free(operation->response);
579         gb_operation_message_free(operation->request);
580
581         kmem_cache_free(gb_operation_cache, operation);
582 }
583
584 /*
585  * Drop a reference on an operation, and destroy it when the last
586  * one is gone.
587  */
588 void gb_operation_put(struct gb_operation *operation)
589 {
590         if (WARN_ON(!operation))
591                 return;
592
593         kref_put_spinlock_irqsave(&operation->kref, _gb_operation_destroy,
594                         &gb_operations_lock);
595 }
596 EXPORT_SYMBOL_GPL(gb_operation_put);
597
598 /* Tell the requester we're done */
599 static void gb_operation_sync_callback(struct gb_operation *operation)
600 {
601         complete(&operation->completion);
602 }
603
604 /*
605  * Send an operation request message. The caller has filled in any payload so
606  * the request message is ready to go. The callback function supplied will be
607  * called when the response message has arrived indicating the operation is
608  * complete. In that case, the callback function is responsible for fetching
609  * the result of the operation using gb_operation_result() if desired, and
610  * dropping the initial reference to the operation.
611  */
612 int gb_operation_request_send(struct gb_operation *operation,
613                                 gb_operation_callback callback,
614                                 gfp_t gfp)
615 {
616         struct gb_connection *connection = operation->connection;
617         struct gb_operation_msg_hdr *header;
618         unsigned int cycle;
619         int ret;
620
621         if (!callback)
622                 return -EINVAL;
623
624         if (connection->state != GB_CONNECTION_STATE_ENABLED)
625                 return -ENOTCONN;
626
627         /*
628          * First, get an extra reference on the operation.
629          * It'll be dropped when the operation completes.
630          */
631         gb_operation_get(operation);
632         gb_operation_get_active(operation);
633
634         /*
635          * Record the callback function, which is executed in
636          * non-atomic (workqueue) context when the final result
637          * of an operation has been set.
638          */
639         operation->callback = callback;
640
641         /*
642          * Assign the operation's id, and store it in the request header.
643          * Zero is a reserved operation id.
644          */
645         cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
646         operation->id = (u16)(cycle % U16_MAX + 1);
647         header = operation->request->header;
648         header->operation_id = cpu_to_le16(operation->id);
649
650         /* All set, send the request */
651         gb_operation_result_set(operation, -EINPROGRESS);
652
653         ret = gb_message_send(operation->request, gfp);
654         if (ret) {
655                 gb_operation_put_active(operation);
656                 gb_operation_put(operation);
657         }
658
659         return ret;
660 }
661 EXPORT_SYMBOL_GPL(gb_operation_request_send);
662
663 /*
664  * Send a synchronous operation.  This function is expected to
665  * block, returning only when the response has arrived, (or when an
666  * error is detected.  The return value is the result of the
667  * operation.
668  */
669 int gb_operation_request_send_sync(struct gb_operation *operation)
670 {
671         int ret;
672         unsigned long timeout;
673
674         ret = gb_operation_request_send(operation, gb_operation_sync_callback,
675                                         GFP_KERNEL);
676         if (ret)
677                 return ret;
678
679         timeout = msecs_to_jiffies(OPERATION_TIMEOUT_DEFAULT);
680         ret = wait_for_completion_interruptible_timeout(&operation->completion, timeout);
681         if (ret < 0) {
682                 /* Cancel the operation if interrupted */
683                 gb_operation_cancel(operation, -ECANCELED);
684         } else if (ret == 0) {
685                 /* Cancel the operation if op timed out */
686                 gb_operation_cancel(operation, -ETIMEDOUT);
687         }
688
689         return gb_operation_result(operation);
690 }
691 EXPORT_SYMBOL_GPL(gb_operation_request_send_sync);
692
693 /*
694  * Send a response for an incoming operation request.  A non-zero
695  * errno indicates a failed operation.
696  *
697  * If there is any response payload, the incoming request handler is
698  * responsible for allocating the response message.  Otherwise the
699  * it can simply supply the result errno; this function will
700  * allocate the response message if necessary.
701  */
702 static int gb_operation_response_send(struct gb_operation *operation,
703                                         int errno)
704 {
705         struct gb_connection *connection = operation->connection;
706         int ret;
707
708         if (!operation->response &&
709                         !gb_operation_is_unidirectional(operation)) {
710                 if (!gb_operation_response_alloc(operation, 0))
711                         return -ENOMEM;
712         }
713
714         /* Record the result */
715         if (!gb_operation_result_set(operation, errno)) {
716                 dev_err(&connection->dev, "request result already set\n");
717                 return -EIO;    /* Shouldn't happen */
718         }
719
720         /* Sender of request does not care about response. */
721         if (gb_operation_is_unidirectional(operation))
722                 return 0;
723
724         /* Reference will be dropped when message has been sent. */
725         gb_operation_get(operation);
726         gb_operation_get_active(operation);
727
728         /* Fill in the response header and send it */
729         operation->response->header->result = gb_operation_errno_map(errno);
730
731         ret = gb_message_send(operation->response, GFP_KERNEL);
732         if (ret) {
733                 gb_operation_put_active(operation);
734                 gb_operation_put(operation);
735         }
736
737         return ret;
738 }
739
740 /*
741  * This function is called when a message send request has completed.
742  */
743 void greybus_message_sent(struct greybus_host_device *hd,
744                                         struct gb_message *message, int status)
745 {
746         struct gb_operation *operation;
747
748         /*
749          * If the message was a response, we just need to drop our
750          * reference to the operation.  If an error occurred, report
751          * it.
752          *
753          * For requests, if there's no error, there's nothing more
754          * to do until the response arrives.  If an error occurred
755          * attempting to send it, record that as the result of
756          * the operation and schedule its completion.
757          */
758         operation = message->operation;
759         if (message == operation->response) {
760                 if (status) {
761                         dev_err(&operation->connection->dev,
762                                 "error sending response: %d\n", status);
763                 }
764                 gb_operation_put_active(operation);
765                 gb_operation_put(operation);
766         } else if (status) {
767                 if (gb_operation_result_set(operation, status))
768                         queue_work(gb_operation_workqueue, &operation->work);
769         }
770 }
771 EXPORT_SYMBOL_GPL(greybus_message_sent);
772
773 /*
774  * We've received data on a connection, and it doesn't look like a
775  * response, so we assume it's a request.
776  *
777  * This is called in interrupt context, so just copy the incoming
778  * data into the request buffer and handle the rest via workqueue.
779  */
780 static void gb_connection_recv_request(struct gb_connection *connection,
781                                        u16 operation_id, u8 type,
782                                        void *data, size_t size)
783 {
784         struct gb_operation *operation;
785
786         operation = gb_operation_create_incoming(connection, operation_id,
787                                                 type, data, size);
788         if (!operation) {
789                 dev_err(&connection->dev, "can't create operation\n");
790                 return;         /* XXX Respond with pre-allocated ENOMEM */
791         }
792
793         gb_operation_get_active(operation);
794
795         /*
796          * The initial reference to the operation will be dropped when the
797          * request handler returns.
798          */
799         if (gb_operation_result_set(operation, -EINPROGRESS))
800                 queue_work(gb_operation_workqueue, &operation->work);
801 }
802
803 /*
804  * We've received data that appears to be an operation response
805  * message.  Look up the operation, and record that we've received
806  * its response.
807  *
808  * This is called in interrupt context, so just copy the incoming
809  * data into the response buffer and handle the rest via workqueue.
810  */
811 static void gb_connection_recv_response(struct gb_connection *connection,
812                         u16 operation_id, u8 result, void *data, size_t size)
813 {
814         struct gb_operation *operation;
815         struct gb_message *message;
816         int errno = gb_operation_status_map(result);
817         size_t message_size;
818
819         operation = gb_operation_find_outgoing(connection, operation_id);
820         if (!operation) {
821                 dev_err(&connection->dev, "operation not found\n");
822                 return;
823         }
824
825         message = operation->response;
826         message_size = sizeof(*message->header) + message->payload_size;
827         if (!errno && size != message_size) {
828                 dev_err(&connection->dev, "bad message size (%zu != %zu)\n",
829                         size, message_size);
830                 errno = -EMSGSIZE;
831         }
832
833         /* We must ignore the payload if a bad status is returned */
834         if (errno)
835                 size = sizeof(*message->header);
836
837         /* The rest will be handled in work queue context */
838         if (gb_operation_result_set(operation, errno)) {
839                 memcpy(message->header, data, size);
840                 queue_work(gb_operation_workqueue, &operation->work);
841         }
842
843         gb_operation_put(operation);
844 }
845
846 /*
847  * Handle data arriving on a connection.  As soon as we return the
848  * supplied data buffer will be reused (so unless we do something
849  * with, it's effectively dropped).
850  */
851 void gb_connection_recv(struct gb_connection *connection,
852                                 void *data, size_t size)
853 {
854         struct gb_operation_msg_hdr header;
855         size_t msg_size;
856         u16 operation_id;
857
858         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
859                 dev_err(&connection->dev, "dropping %zu received bytes\n",
860                         size);
861                 return;
862         }
863
864         if (size < sizeof(header)) {
865                 dev_err(&connection->dev, "message too small\n");
866                 return;
867         }
868
869         /* Use memcpy as data may be unaligned */
870         memcpy(&header, data, sizeof(header));
871         msg_size = le16_to_cpu(header.size);
872         if (size < msg_size) {
873                 dev_err(&connection->dev,
874                         "incomplete message received: 0x%04x (%zu < %zu)\n",
875                         le16_to_cpu(header.operation_id), size, msg_size);
876                 return;         /* XXX Should still complete operation */
877         }
878
879         operation_id = le16_to_cpu(header.operation_id);
880         if (header.type & GB_MESSAGE_TYPE_RESPONSE)
881                 gb_connection_recv_response(connection, operation_id,
882                                                 header.result, data, msg_size);
883         else
884                 gb_connection_recv_request(connection, operation_id,
885                                                 header.type, data, msg_size);
886 }
887
888 /*
889  * Cancel an operation synchronously, and record the given error to indicate
890  * why.
891  */
892 void gb_operation_cancel(struct gb_operation *operation, int errno)
893 {
894         if (gb_operation_is_incoming(operation)) {
895                 /* Cancel response if it has been allocated */
896                 if (!gb_operation_result_set(operation, errno) &&
897                                 !gb_operation_is_unidirectional(operation)) {
898                         gb_message_cancel(operation->response);
899                 }
900         } else {
901                 if (gb_operation_result_set(operation, errno)) {
902                         gb_message_cancel(operation->request);
903                         queue_work(gb_operation_workqueue, &operation->work);
904                 }
905         }
906
907         atomic_inc(&operation->waiters);
908         wait_event(gb_operation_cancellation_queue,
909                         !gb_operation_is_active(operation));
910         atomic_dec(&operation->waiters);
911 }
912 EXPORT_SYMBOL_GPL(gb_operation_cancel);
913
914 /**
915  * gb_operation_sync: implement a "simple" synchronous gb operation.
916  * @connection: the Greybus connection to send this to
917  * @type: the type of operation to send
918  * @request: pointer to a memory buffer to copy the request from
919  * @request_size: size of @request
920  * @response: pointer to a memory buffer to copy the response to
921  * @response_size: the size of @response.
922  *
923  * This function implements a simple synchronous Greybus operation.  It sends
924  * the provided operation request and waits (sleeps) until the corresponding
925  * operation response message has been successfully received, or an error
926  * occurs.  @request and @response are buffers to hold the request and response
927  * data respectively, and if they are not NULL, their size must be specified in
928  * @request_size and @response_size.
929  *
930  * If a response payload is to come back, and @response is not NULL,
931  * @response_size number of bytes will be copied into @response if the operation
932  * is successful.
933  *
934  * If there is an error, the response buffer is left alone.
935  */
936 int gb_operation_sync(struct gb_connection *connection, int type,
937                       void *request, int request_size,
938                       void *response, int response_size)
939 {
940         struct gb_operation *operation;
941         int ret;
942
943         if ((response_size && !response) ||
944             (request_size && !request))
945                 return -EINVAL;
946
947         operation = gb_operation_create(connection, type,
948                                         request_size, response_size,
949                                         GFP_KERNEL);
950         if (!operation)
951                 return -ENOMEM;
952
953         if (request_size)
954                 memcpy(operation->request->payload, request, request_size);
955
956         ret = gb_operation_request_send_sync(operation);
957         if (ret) {
958                 dev_err(&connection->dev, "synchronous operation failed: %d\n",
959                         ret);
960         } else {
961                 if (response_size) {
962                         memcpy(response, operation->response->payload,
963                                response_size);
964                 }
965         }
966         gb_operation_destroy(operation);
967
968         return ret;
969 }
970 EXPORT_SYMBOL_GPL(gb_operation_sync);
971
972 int __init gb_operation_init(void)
973 {
974         gb_message_cache = kmem_cache_create("gb_message_cache",
975                                 sizeof(struct gb_message), 0, 0, NULL);
976         if (!gb_message_cache)
977                 return -ENOMEM;
978
979         gb_operation_cache = kmem_cache_create("gb_operation_cache",
980                                 sizeof(struct gb_operation), 0, 0, NULL);
981         if (!gb_operation_cache)
982                 goto err_destroy_message_cache;
983
984         gb_operation_workqueue = alloc_workqueue("greybus_operation", 0, 1);
985         if (!gb_operation_workqueue)
986                 goto err_operation;
987
988         return 0;
989 err_operation:
990         kmem_cache_destroy(gb_operation_cache);
991         gb_operation_cache = NULL;
992 err_destroy_message_cache:
993         kmem_cache_destroy(gb_message_cache);
994         gb_message_cache = NULL;
995
996         return -ENOMEM;
997 }
998
999 void gb_operation_exit(void)
1000 {
1001         destroy_workqueue(gb_operation_workqueue);
1002         gb_operation_workqueue = NULL;
1003         kmem_cache_destroy(gb_operation_cache);
1004         gb_operation_cache = NULL;
1005         kmem_cache_destroy(gb_message_cache);
1006         gb_message_cache = NULL;
1007 }