greybus: connection: set request handlers at creation
[cascardo/linux.git] / drivers / staging / greybus / connection.c
1 /*
2  * Greybus connections
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13
14
15 static void gb_connection_kref_release(struct kref *kref);
16
17
18 static DEFINE_SPINLOCK(gb_connections_lock);
19 static DEFINE_MUTEX(gb_connection_mutex);
20
21
22 /* Caller holds gb_connection_mutex. */
23 static struct gb_connection *
24 gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
25 {
26         struct gb_host_device *hd = intf->hd;
27         struct gb_connection *connection;
28
29         list_for_each_entry(connection, &hd->connections, hd_links) {
30                 if (connection->intf == intf &&
31                                 connection->intf_cport_id == cport_id)
32                         return connection;
33         }
34
35         return NULL;
36 }
37
38 static void gb_connection_get(struct gb_connection *connection)
39 {
40         kref_get(&connection->kref);
41 }
42
43 static void gb_connection_put(struct gb_connection *connection)
44 {
45         kref_put(&connection->kref, gb_connection_kref_release);
46 }
47
48 /*
49  * Returns a reference-counted pointer to the connection if found.
50  */
51 static struct gb_connection *
52 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
53 {
54         struct gb_connection *connection;
55         unsigned long flags;
56
57         spin_lock_irqsave(&gb_connections_lock, flags);
58         list_for_each_entry(connection, &hd->connections, hd_links)
59                 if (connection->hd_cport_id == cport_id) {
60                         gb_connection_get(connection);
61                         goto found;
62                 }
63         connection = NULL;
64 found:
65         spin_unlock_irqrestore(&gb_connections_lock, flags);
66
67         return connection;
68 }
69
70 /*
71  * Callback from the host driver to let us know that data has been
72  * received on the bundle.
73  */
74 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
75                         u8 *data, size_t length)
76 {
77         struct gb_connection *connection;
78
79         connection = gb_connection_hd_find(hd, cport_id);
80         if (!connection) {
81                 dev_err(&hd->dev,
82                         "nonexistent connection (%zu bytes dropped)\n", length);
83                 return;
84         }
85         gb_connection_recv(connection, data, length);
86         gb_connection_put(connection);
87 }
88 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
89
90 static void gb_connection_kref_release(struct kref *kref)
91 {
92         struct gb_connection *connection;
93
94         connection = container_of(kref, struct gb_connection, kref);
95
96         kfree(connection);
97 }
98
99 static void gb_connection_init_name(struct gb_connection *connection)
100 {
101         u16 hd_cport_id = connection->hd_cport_id;
102         u16 cport_id = 0;
103         u8 intf_id = 0;
104
105         if (connection->intf) {
106                 intf_id = connection->intf->interface_id;
107                 cport_id = connection->intf_cport_id;
108         }
109
110         snprintf(connection->name, sizeof(connection->name),
111                         "%u/%u:%u", hd_cport_id, intf_id, cport_id);
112 }
113
114 /*
115  * _gb_connection_create() - create a Greybus connection
116  * @hd:                 host device of the connection
117  * @hd_cport_id:        host-device cport id, or -1 for dynamic allocation
118  * @intf:               remote interface, or NULL for static connections
119  * @bundle:             remote-interface bundle (may be NULL)
120  * @cport_id:           remote-interface cport id, or 0 for static connections
121  * @handler:            request handler (may be NULL)
122  *
123  * Create a Greybus connection, representing the bidirectional link
124  * between a CPort on a (local) Greybus host device and a CPort on
125  * another Greybus interface.
126  *
127  * A connection also maintains the state of operations sent over the
128  * connection.
129  *
130  * Serialised against concurrent create and destroy using the
131  * gb_connection_mutex.
132  *
133  * Return: A pointer to the new connection if successful, or an ERR_PTR
134  * otherwise.
135  */
136 static struct gb_connection *
137 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
138                                 struct gb_interface *intf,
139                                 struct gb_bundle *bundle, int cport_id,
140                                 gb_request_handler_t handler)
141 {
142         struct gb_connection *connection;
143         struct ida *id_map = &hd->cport_id_map;
144         int ida_start, ida_end;
145         int ret;
146
147         if (hd_cport_id < 0) {
148                 ida_start = 0;
149                 ida_end = hd->num_cports;
150         } else if (hd_cport_id < hd->num_cports) {
151                 ida_start = hd_cport_id;
152                 ida_end = hd_cport_id + 1;
153         } else {
154                 dev_err(&hd->dev, "cport %d not available\n", hd_cport_id);
155                 return ERR_PTR(-EINVAL);
156         }
157
158         mutex_lock(&gb_connection_mutex);
159
160         if (intf && gb_connection_intf_find(intf, cport_id)) {
161                 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
162                 ret = -EBUSY;
163                 goto err_unlock;
164         }
165
166         ret = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
167         if (ret < 0)
168                 goto err_unlock;
169         hd_cport_id = ret;
170
171         connection = kzalloc(sizeof(*connection), GFP_KERNEL);
172         if (!connection) {
173                 ret = -ENOMEM;
174                 goto err_remove_ida;
175         }
176
177         connection->hd_cport_id = hd_cport_id;
178         connection->intf_cport_id = cport_id;
179         connection->hd = hd;
180         connection->intf = intf;
181         connection->bundle = bundle;
182         connection->handler = handler;
183         connection->state = GB_CONNECTION_STATE_DISABLED;
184
185         atomic_set(&connection->op_cycle, 0);
186         mutex_init(&connection->mutex);
187         spin_lock_init(&connection->lock);
188         INIT_LIST_HEAD(&connection->operations);
189
190         connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
191                                          dev_name(&hd->dev), hd_cport_id);
192         if (!connection->wq) {
193                 ret = -ENOMEM;
194                 goto err_free_connection;
195         }
196
197         kref_init(&connection->kref);
198
199         gb_connection_init_name(connection);
200
201         spin_lock_irq(&gb_connections_lock);
202         list_add(&connection->hd_links, &hd->connections);
203
204         if (bundle)
205                 list_add(&connection->bundle_links, &bundle->connections);
206         else
207                 INIT_LIST_HEAD(&connection->bundle_links);
208
209         spin_unlock_irq(&gb_connections_lock);
210
211         mutex_unlock(&gb_connection_mutex);
212
213         return connection;
214
215 err_free_connection:
216         kfree(connection);
217 err_remove_ida:
218         ida_simple_remove(id_map, hd_cport_id);
219 err_unlock:
220         mutex_unlock(&gb_connection_mutex);
221
222         return ERR_PTR(ret);
223 }
224
225 struct gb_connection *
226 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
227                                         gb_request_handler_t handler)
228 {
229         return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler);
230 }
231
232 struct gb_connection *
233 gb_connection_create_control(struct gb_interface *intf)
234 {
235         return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL);
236 }
237
238 struct gb_connection *
239 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
240                                         gb_request_handler_t handler)
241 {
242         struct gb_interface *intf = bundle->intf;
243
244         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
245                                         handler);
246 }
247 EXPORT_SYMBOL_GPL(gb_connection_create);
248
249 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
250 {
251         struct gb_host_device *hd = connection->hd;
252         int ret;
253
254         if (!hd->driver->cport_enable)
255                 return 0;
256
257         ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
258         if (ret) {
259                 dev_err(&hd->dev,
260                         "failed to enable host cport: %d\n", ret);
261                 return ret;
262         }
263
264         return 0;
265 }
266
267 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
268 {
269         struct gb_host_device *hd = connection->hd;
270
271         if (!hd->driver->cport_disable)
272                 return;
273
274         hd->driver->cport_disable(hd, connection->hd_cport_id);
275 }
276
277 /*
278  * Request the SVC to create a connection from AP's cport to interface's
279  * cport.
280  */
281 static int
282 gb_connection_svc_connection_create(struct gb_connection *connection)
283 {
284         struct gb_host_device *hd = connection->hd;
285         struct gb_interface *intf;
286         int ret;
287
288         if (gb_connection_is_static(connection))
289                 return 0;
290
291         intf = connection->intf;
292         ret = gb_svc_connection_create(hd->svc,
293                         hd->svc->ap_intf_id,
294                         connection->hd_cport_id,
295                         intf->interface_id,
296                         connection->intf_cport_id,
297                         intf->boot_over_unipro);
298         if (ret) {
299                 dev_err(&connection->hd->dev,
300                         "%s: failed to create svc connection: %d\n",
301                         connection->name, ret);
302                 return ret;
303         }
304
305         return 0;
306 }
307
308 static void
309 gb_connection_svc_connection_destroy(struct gb_connection *connection)
310 {
311         if (gb_connection_is_static(connection))
312                 return;
313
314         gb_svc_connection_destroy(connection->hd->svc,
315                                   connection->hd->svc->ap_intf_id,
316                                   connection->hd_cport_id,
317                                   connection->intf->interface_id,
318                                   connection->intf_cport_id);
319 }
320
321 /* Inform Interface about active CPorts */
322 static int gb_connection_control_connected(struct gb_connection *connection)
323 {
324         struct gb_control *control;
325         u16 cport_id = connection->intf_cport_id;
326         int ret;
327
328         if (gb_connection_is_static(connection))
329                 return 0;
330
331         control = connection->intf->control;
332
333         if (connection == control->connection)
334                 return 0;
335
336         ret = gb_control_connected_operation(control, cport_id);
337         if (ret) {
338                 dev_err(&connection->bundle->dev,
339                         "failed to connect cport: %d\n", ret);
340                 return ret;
341         }
342
343         return 0;
344 }
345
346 /* Inform Interface about inactive CPorts */
347 static void
348 gb_connection_control_disconnected(struct gb_connection *connection)
349 {
350         struct gb_control *control;
351         u16 cport_id = connection->intf_cport_id;
352         int ret;
353
354         if (gb_connection_is_static(connection))
355                 return;
356
357         control = connection->intf->control;
358
359         if (connection == control->connection)
360                 return;
361
362         ret = gb_control_disconnected_operation(control, cport_id);
363         if (ret) {
364                 dev_warn(&connection->bundle->dev,
365                          "failed to disconnect cport: %d\n", ret);
366         }
367 }
368
369 /*
370  * Cancel all active operations on a connection.
371  *
372  * Locking: Called with connection lock held and state set to DISABLED.
373  */
374 static void gb_connection_cancel_operations(struct gb_connection *connection,
375                                                 int errno)
376 {
377         struct gb_operation *operation;
378
379         while (!list_empty(&connection->operations)) {
380                 operation = list_last_entry(&connection->operations,
381                                                 struct gb_operation, links);
382                 gb_operation_get(operation);
383                 spin_unlock_irq(&connection->lock);
384
385                 if (gb_operation_is_incoming(operation))
386                         gb_operation_cancel_incoming(operation, errno);
387                 else
388                         gb_operation_cancel(operation, errno);
389
390                 gb_operation_put(operation);
391
392                 spin_lock_irq(&connection->lock);
393         }
394 }
395
396 /*
397  * Cancel all active incoming operations on a connection.
398  *
399  * Locking: Called with connection lock held and state set to ENABLED_TX.
400  */
401 static void
402 gb_connection_flush_incoming_operations(struct gb_connection *connection,
403                                                 int errno)
404 {
405         struct gb_operation *operation;
406         bool incoming;
407
408         while (!list_empty(&connection->operations)) {
409                 incoming = false;
410                 list_for_each_entry(operation, &connection->operations,
411                                                                 links) {
412                         if (gb_operation_is_incoming(operation)) {
413                                 gb_operation_get(operation);
414                                 incoming = true;
415                                 break;
416                         }
417                 }
418
419                 if (!incoming)
420                         break;
421
422                 spin_unlock_irq(&connection->lock);
423
424                 /* FIXME: flush, not cancel? */
425                 gb_operation_cancel_incoming(operation, errno);
426                 gb_operation_put(operation);
427
428                 spin_lock_irq(&connection->lock);
429         }
430 }
431
432 /*
433  * _gb_connection_enable() - enable a connection
434  * @connection:         connection to enable
435  * @rx:                 whether to enable incoming requests
436  *
437  * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
438  * ENABLED_TX->ENABLED state transitions.
439  *
440  * Locking: Caller holds connection->mutex.
441  */
442 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
443 {
444         int ret;
445
446         /* Handle ENABLED_TX -> ENABLED transitions. */
447         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
448                 if (!(connection->handler && rx))
449                         return 0;
450
451                 spin_lock_irq(&connection->lock);
452                 connection->state = GB_CONNECTION_STATE_ENABLED;
453                 spin_unlock_irq(&connection->lock);
454
455                 return 0;
456         }
457
458         ret = gb_connection_hd_cport_enable(connection);
459         if (ret)
460                 return ret;
461
462         ret = gb_connection_svc_connection_create(connection);
463         if (ret)
464                 goto err_hd_cport_disable;
465
466         spin_lock_irq(&connection->lock);
467         if (connection->handler && rx)
468                 connection->state = GB_CONNECTION_STATE_ENABLED;
469         else
470                 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
471         spin_unlock_irq(&connection->lock);
472
473         ret = gb_connection_control_connected(connection);
474         if (ret)
475                 goto err_svc_destroy;
476
477         return 0;
478
479 err_svc_destroy:
480         spin_lock_irq(&connection->lock);
481         connection->state = GB_CONNECTION_STATE_DISABLED;
482         gb_connection_cancel_operations(connection, -ESHUTDOWN);
483         spin_unlock_irq(&connection->lock);
484
485         gb_connection_svc_connection_destroy(connection);
486 err_hd_cport_disable:
487         gb_connection_hd_cport_disable(connection);
488
489         return ret;
490 }
491
492 int gb_connection_enable(struct gb_connection *connection)
493 {
494         int ret = 0;
495
496         mutex_lock(&connection->mutex);
497
498         if (connection->state == GB_CONNECTION_STATE_ENABLED)
499                 goto out_unlock;
500
501         ret = _gb_connection_enable(connection, true);
502 out_unlock:
503         mutex_unlock(&connection->mutex);
504
505         return ret;
506 }
507 EXPORT_SYMBOL_GPL(gb_connection_enable);
508
509 int gb_connection_enable_tx(struct gb_connection *connection)
510 {
511         int ret = 0;
512
513         mutex_lock(&connection->mutex);
514
515         if (connection->state == GB_CONNECTION_STATE_ENABLED) {
516                 ret = -EINVAL;
517                 goto out_unlock;
518         }
519
520         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
521                 goto out_unlock;
522
523         ret = _gb_connection_enable(connection, false);
524 out_unlock:
525         mutex_unlock(&connection->mutex);
526
527         return ret;
528 }
529 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
530
531 void gb_connection_disable_rx(struct gb_connection *connection)
532 {
533         mutex_lock(&connection->mutex);
534
535         spin_lock_irq(&connection->lock);
536         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
537                 spin_unlock_irq(&connection->lock);
538                 goto out_unlock;
539         }
540         connection->state = GB_CONNECTION_STATE_ENABLED_TX;
541         gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
542         spin_unlock_irq(&connection->lock);
543
544 out_unlock:
545         mutex_unlock(&connection->mutex);
546 }
547
548 void gb_connection_disable(struct gb_connection *connection)
549 {
550         mutex_lock(&connection->mutex);
551
552         if (connection->state == GB_CONNECTION_STATE_DISABLED)
553                 goto out_unlock;
554
555         gb_connection_control_disconnected(connection);
556
557         spin_lock_irq(&connection->lock);
558         connection->state = GB_CONNECTION_STATE_DISABLED;
559         gb_connection_cancel_operations(connection, -ESHUTDOWN);
560         spin_unlock_irq(&connection->lock);
561
562         gb_connection_svc_connection_destroy(connection);
563         gb_connection_hd_cport_disable(connection);
564
565 out_unlock:
566         mutex_unlock(&connection->mutex);
567 }
568 EXPORT_SYMBOL_GPL(gb_connection_disable);
569
570 /* Caller must have disabled the connection before destroying it. */
571 void gb_connection_destroy(struct gb_connection *connection)
572 {
573         struct ida *id_map;
574
575         if (!connection)
576                 return;
577
578         mutex_lock(&gb_connection_mutex);
579
580         spin_lock_irq(&gb_connections_lock);
581         list_del(&connection->bundle_links);
582         list_del(&connection->hd_links);
583         spin_unlock_irq(&gb_connections_lock);
584
585         destroy_workqueue(connection->wq);
586
587         id_map = &connection->hd->cport_id_map;
588         ida_simple_remove(id_map, connection->hd_cport_id);
589         connection->hd_cport_id = CPORT_ID_BAD;
590
591         mutex_unlock(&gb_connection_mutex);
592
593         gb_connection_put(connection);
594 }
595 EXPORT_SYMBOL_GPL(gb_connection_destroy);
596
597 void gb_connection_latency_tag_enable(struct gb_connection *connection)
598 {
599         struct gb_host_device *hd = connection->hd;
600         int ret;
601
602         if (!hd->driver->latency_tag_enable)
603                 return;
604
605         ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
606         if (ret) {
607                 dev_err(&connection->hd->dev,
608                         "%s: failed to enable latency tag: %d\n",
609                         connection->name, ret);
610         }
611 }
612 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
613
614 void gb_connection_latency_tag_disable(struct gb_connection *connection)
615 {
616         struct gb_host_device *hd = connection->hd;
617         int ret;
618
619         if (!hd->driver->latency_tag_disable)
620                 return;
621
622         ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
623         if (ret) {
624                 dev_err(&connection->hd->dev,
625                         "%s: failed to disable latency tag: %d\n",
626                         connection->name, ret);
627         }
628 }
629 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);