greybus: connection: add api to {en,dis}able unipro fct flow
[cascardo/linux.git] / drivers / staging / greybus / connection.c
1 /*
2  * Greybus connections
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13
14
15 static void gb_connection_kref_release(struct kref *kref);
16
17
18 static DEFINE_SPINLOCK(gb_connections_lock);
19 static DEFINE_MUTEX(gb_connection_mutex);
20
21
22 /* Caller holds gb_connection_mutex. */
23 static struct gb_connection *
24 gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
25 {
26         struct gb_host_device *hd = intf->hd;
27         struct gb_connection *connection;
28
29         list_for_each_entry(connection, &hd->connections, hd_links) {
30                 if (connection->intf == intf &&
31                                 connection->intf_cport_id == cport_id)
32                         return connection;
33         }
34
35         return NULL;
36 }
37
38 static void gb_connection_get(struct gb_connection *connection)
39 {
40         kref_get(&connection->kref);
41 }
42
43 static void gb_connection_put(struct gb_connection *connection)
44 {
45         kref_put(&connection->kref, gb_connection_kref_release);
46 }
47
48 /*
49  * Returns a reference-counted pointer to the connection if found.
50  */
51 static struct gb_connection *
52 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
53 {
54         struct gb_connection *connection;
55         unsigned long flags;
56
57         spin_lock_irqsave(&gb_connections_lock, flags);
58         list_for_each_entry(connection, &hd->connections, hd_links)
59                 if (connection->hd_cport_id == cport_id) {
60                         gb_connection_get(connection);
61                         goto found;
62                 }
63         connection = NULL;
64 found:
65         spin_unlock_irqrestore(&gb_connections_lock, flags);
66
67         return connection;
68 }
69
70 /*
71  * Callback from the host driver to let us know that data has been
72  * received on the bundle.
73  */
74 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
75                         u8 *data, size_t length)
76 {
77         struct gb_connection *connection;
78
79         connection = gb_connection_hd_find(hd, cport_id);
80         if (!connection) {
81                 dev_err(&hd->dev,
82                         "nonexistent connection (%zu bytes dropped)\n", length);
83                 return;
84         }
85         gb_connection_recv(connection, data, length);
86         gb_connection_put(connection);
87 }
88 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
89
90 static void gb_connection_kref_release(struct kref *kref)
91 {
92         struct gb_connection *connection;
93
94         connection = container_of(kref, struct gb_connection, kref);
95
96         kfree(connection);
97 }
98
99 static void gb_connection_init_name(struct gb_connection *connection)
100 {
101         u16 hd_cport_id = connection->hd_cport_id;
102         u16 cport_id = 0;
103         u8 intf_id = 0;
104
105         if (connection->intf) {
106                 intf_id = connection->intf->interface_id;
107                 cport_id = connection->intf_cport_id;
108         }
109
110         snprintf(connection->name, sizeof(connection->name),
111                         "%u/%u:%u", hd_cport_id, intf_id, cport_id);
112 }
113
114 /*
115  * _gb_connection_create() - create a Greybus connection
116  * @hd:                 host device of the connection
117  * @hd_cport_id:        host-device cport id, or -1 for dynamic allocation
118  * @intf:               remote interface, or NULL for static connections
119  * @bundle:             remote-interface bundle (may be NULL)
120  * @cport_id:           remote-interface cport id, or 0 for static connections
121  * @handler:            request handler (may be NULL)
122  *
123  * Create a Greybus connection, representing the bidirectional link
124  * between a CPort on a (local) Greybus host device and a CPort on
125  * another Greybus interface.
126  *
127  * A connection also maintains the state of operations sent over the
128  * connection.
129  *
130  * Serialised against concurrent create and destroy using the
131  * gb_connection_mutex.
132  *
133  * Return: A pointer to the new connection if successful, or an ERR_PTR
134  * otherwise.
135  */
136 static struct gb_connection *
137 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
138                                 struct gb_interface *intf,
139                                 struct gb_bundle *bundle, int cport_id,
140                                 gb_request_handler_t handler)
141 {
142         struct gb_connection *connection;
143         struct ida *id_map = &hd->cport_id_map;
144         int ida_start, ida_end;
145         int ret;
146
147         if (hd_cport_id < 0) {
148                 ida_start = 0;
149                 ida_end = hd->num_cports;
150         } else if (hd_cport_id < hd->num_cports) {
151                 ida_start = hd_cport_id;
152                 ida_end = hd_cport_id + 1;
153         } else {
154                 dev_err(&hd->dev, "cport %d not available\n", hd_cport_id);
155                 return ERR_PTR(-EINVAL);
156         }
157
158         mutex_lock(&gb_connection_mutex);
159
160         if (intf && gb_connection_intf_find(intf, cport_id)) {
161                 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
162                 ret = -EBUSY;
163                 goto err_unlock;
164         }
165
166         ret = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
167         if (ret < 0)
168                 goto err_unlock;
169         hd_cport_id = ret;
170
171         connection = kzalloc(sizeof(*connection), GFP_KERNEL);
172         if (!connection) {
173                 ret = -ENOMEM;
174                 goto err_remove_ida;
175         }
176
177         connection->hd_cport_id = hd_cport_id;
178         connection->intf_cport_id = cport_id;
179         connection->hd = hd;
180         connection->intf = intf;
181         connection->bundle = bundle;
182         connection->handler = handler;
183         connection->state = GB_CONNECTION_STATE_DISABLED;
184
185         atomic_set(&connection->op_cycle, 0);
186         mutex_init(&connection->mutex);
187         spin_lock_init(&connection->lock);
188         INIT_LIST_HEAD(&connection->operations);
189
190         connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
191                                          dev_name(&hd->dev), hd_cport_id);
192         if (!connection->wq) {
193                 ret = -ENOMEM;
194                 goto err_free_connection;
195         }
196
197         kref_init(&connection->kref);
198
199         gb_connection_init_name(connection);
200
201         spin_lock_irq(&gb_connections_lock);
202         list_add(&connection->hd_links, &hd->connections);
203
204         if (bundle)
205                 list_add(&connection->bundle_links, &bundle->connections);
206         else
207                 INIT_LIST_HEAD(&connection->bundle_links);
208
209         spin_unlock_irq(&gb_connections_lock);
210
211         mutex_unlock(&gb_connection_mutex);
212
213         return connection;
214
215 err_free_connection:
216         kfree(connection);
217 err_remove_ida:
218         ida_simple_remove(id_map, hd_cport_id);
219 err_unlock:
220         mutex_unlock(&gb_connection_mutex);
221
222         return ERR_PTR(ret);
223 }
224
225 struct gb_connection *
226 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
227                                         gb_request_handler_t handler)
228 {
229         return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler);
230 }
231
232 struct gb_connection *
233 gb_connection_create_control(struct gb_interface *intf)
234 {
235         return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL);
236 }
237
238 struct gb_connection *
239 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
240                                         gb_request_handler_t handler)
241 {
242         struct gb_interface *intf = bundle->intf;
243
244         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
245                                         handler);
246 }
247 EXPORT_SYMBOL_GPL(gb_connection_create);
248
249 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
250 {
251         struct gb_host_device *hd = connection->hd;
252         int ret;
253
254         if (!hd->driver->cport_enable)
255                 return 0;
256
257         ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
258         if (ret) {
259                 dev_err(&hd->dev,
260                         "failed to enable host cport: %d\n", ret);
261                 return ret;
262         }
263
264         return 0;
265 }
266
267 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
268 {
269         struct gb_host_device *hd = connection->hd;
270
271         if (!hd->driver->cport_disable)
272                 return;
273
274         hd->driver->cport_disable(hd, connection->hd_cport_id);
275 }
276
277 static int gb_connection_hd_fct_flow_enable(struct gb_connection *connection)
278 {
279         struct gb_host_device *hd = connection->hd;
280         int ret;
281
282         if (!hd->driver->fct_flow_enable)
283                 return 0;
284
285         ret = hd->driver->fct_flow_enable(hd, connection->hd_cport_id);
286         if (ret) {
287                 dev_err(&hd->dev, "%s: failed to enable FCT flow: %d\n",
288                         connection->name, ret);
289                 return ret;
290         }
291
292         return 0;
293 }
294
295 static void gb_connection_hd_fct_flow_disable(struct gb_connection *connection)
296 {
297         struct gb_host_device *hd = connection->hd;
298
299         if (!hd->driver->fct_flow_disable)
300                 return;
301
302         hd->driver->fct_flow_disable(hd, connection->hd_cport_id);
303 }
304
305 /*
306  * Request the SVC to create a connection from AP's cport to interface's
307  * cport.
308  */
309 static int
310 gb_connection_svc_connection_create(struct gb_connection *connection)
311 {
312         struct gb_host_device *hd = connection->hd;
313         struct gb_interface *intf;
314         int ret;
315
316         if (gb_connection_is_static(connection))
317                 return 0;
318
319         intf = connection->intf;
320         ret = gb_svc_connection_create(hd->svc,
321                         hd->svc->ap_intf_id,
322                         connection->hd_cport_id,
323                         intf->interface_id,
324                         connection->intf_cport_id,
325                         intf->boot_over_unipro);
326         if (ret) {
327                 dev_err(&connection->hd->dev,
328                         "%s: failed to create svc connection: %d\n",
329                         connection->name, ret);
330                 return ret;
331         }
332
333         return 0;
334 }
335
336 static void
337 gb_connection_svc_connection_destroy(struct gb_connection *connection)
338 {
339         if (gb_connection_is_static(connection))
340                 return;
341
342         gb_svc_connection_destroy(connection->hd->svc,
343                                   connection->hd->svc->ap_intf_id,
344                                   connection->hd_cport_id,
345                                   connection->intf->interface_id,
346                                   connection->intf_cport_id);
347 }
348
349 /* Inform Interface about active CPorts */
350 static int gb_connection_control_connected(struct gb_connection *connection)
351 {
352         struct gb_control *control;
353         u16 cport_id = connection->intf_cport_id;
354         int ret;
355
356         if (gb_connection_is_static(connection))
357                 return 0;
358
359         control = connection->intf->control;
360
361         if (connection == control->connection)
362                 return 0;
363
364         ret = gb_control_connected_operation(control, cport_id);
365         if (ret) {
366                 dev_err(&connection->bundle->dev,
367                         "failed to connect cport: %d\n", ret);
368                 return ret;
369         }
370
371         return 0;
372 }
373
374 /* Inform Interface about inactive CPorts */
375 static void
376 gb_connection_control_disconnected(struct gb_connection *connection)
377 {
378         struct gb_control *control;
379         u16 cport_id = connection->intf_cport_id;
380         int ret;
381
382         if (gb_connection_is_static(connection))
383                 return;
384
385         control = connection->intf->control;
386
387         if (connection == control->connection)
388                 return;
389
390         ret = gb_control_disconnected_operation(control, cport_id);
391         if (ret) {
392                 dev_warn(&connection->bundle->dev,
393                          "failed to disconnect cport: %d\n", ret);
394         }
395 }
396
397 /*
398  * Cancel all active operations on a connection.
399  *
400  * Locking: Called with connection lock held and state set to DISABLED.
401  */
402 static void gb_connection_cancel_operations(struct gb_connection *connection,
403                                                 int errno)
404         __must_hold(&connection->lock)
405 {
406         struct gb_operation *operation;
407
408         while (!list_empty(&connection->operations)) {
409                 operation = list_last_entry(&connection->operations,
410                                                 struct gb_operation, links);
411                 gb_operation_get(operation);
412                 spin_unlock_irq(&connection->lock);
413
414                 if (gb_operation_is_incoming(operation))
415                         gb_operation_cancel_incoming(operation, errno);
416                 else
417                         gb_operation_cancel(operation, errno);
418
419                 gb_operation_put(operation);
420
421                 spin_lock_irq(&connection->lock);
422         }
423 }
424
425 /*
426  * Cancel all active incoming operations on a connection.
427  *
428  * Locking: Called with connection lock held and state set to ENABLED_TX.
429  */
430 static void
431 gb_connection_flush_incoming_operations(struct gb_connection *connection,
432                                                 int errno)
433         __must_hold(&connection->lock)
434 {
435         struct gb_operation *operation;
436         bool incoming;
437
438         while (!list_empty(&connection->operations)) {
439                 incoming = false;
440                 list_for_each_entry(operation, &connection->operations,
441                                                                 links) {
442                         if (gb_operation_is_incoming(operation)) {
443                                 gb_operation_get(operation);
444                                 incoming = true;
445                                 break;
446                         }
447                 }
448
449                 if (!incoming)
450                         break;
451
452                 spin_unlock_irq(&connection->lock);
453
454                 /* FIXME: flush, not cancel? */
455                 gb_operation_cancel_incoming(operation, errno);
456                 gb_operation_put(operation);
457
458                 spin_lock_irq(&connection->lock);
459         }
460 }
461
462 /*
463  * _gb_connection_enable() - enable a connection
464  * @connection:         connection to enable
465  * @rx:                 whether to enable incoming requests
466  *
467  * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
468  * ENABLED_TX->ENABLED state transitions.
469  *
470  * Locking: Caller holds connection->mutex.
471  */
472 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
473 {
474         int ret;
475
476         /* Handle ENABLED_TX -> ENABLED transitions. */
477         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
478                 if (!(connection->handler && rx))
479                         return 0;
480
481                 spin_lock_irq(&connection->lock);
482                 connection->state = GB_CONNECTION_STATE_ENABLED;
483                 spin_unlock_irq(&connection->lock);
484
485                 return 0;
486         }
487
488         ret = gb_connection_hd_cport_enable(connection);
489         if (ret)
490                 return ret;
491
492         ret = gb_connection_svc_connection_create(connection);
493         if (ret)
494                 goto err_hd_cport_disable;
495
496         spin_lock_irq(&connection->lock);
497         if (connection->handler && rx)
498                 connection->state = GB_CONNECTION_STATE_ENABLED;
499         else
500                 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
501         spin_unlock_irq(&connection->lock);
502
503         ret = gb_connection_control_connected(connection);
504         if (ret)
505                 goto err_svc_destroy;
506
507         return 0;
508
509 err_svc_destroy:
510         spin_lock_irq(&connection->lock);
511         connection->state = GB_CONNECTION_STATE_DISABLED;
512         gb_connection_cancel_operations(connection, -ESHUTDOWN);
513         spin_unlock_irq(&connection->lock);
514
515         gb_connection_svc_connection_destroy(connection);
516 err_hd_cport_disable:
517         gb_connection_hd_cport_disable(connection);
518
519         return ret;
520 }
521
522 int gb_connection_enable(struct gb_connection *connection)
523 {
524         int ret = 0;
525
526         mutex_lock(&connection->mutex);
527
528         if (connection->state == GB_CONNECTION_STATE_ENABLED)
529                 goto out_unlock;
530
531         ret = _gb_connection_enable(connection, true);
532 out_unlock:
533         mutex_unlock(&connection->mutex);
534
535         return ret;
536 }
537 EXPORT_SYMBOL_GPL(gb_connection_enable);
538
539 int gb_connection_enable_tx(struct gb_connection *connection)
540 {
541         int ret = 0;
542
543         mutex_lock(&connection->mutex);
544
545         if (connection->state == GB_CONNECTION_STATE_ENABLED) {
546                 ret = -EINVAL;
547                 goto out_unlock;
548         }
549
550         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
551                 goto out_unlock;
552
553         ret = _gb_connection_enable(connection, false);
554 out_unlock:
555         mutex_unlock(&connection->mutex);
556
557         return ret;
558 }
559 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
560
561 void gb_connection_disable_rx(struct gb_connection *connection)
562 {
563         mutex_lock(&connection->mutex);
564
565         spin_lock_irq(&connection->lock);
566         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
567                 spin_unlock_irq(&connection->lock);
568                 goto out_unlock;
569         }
570         connection->state = GB_CONNECTION_STATE_ENABLED_TX;
571         gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
572         spin_unlock_irq(&connection->lock);
573
574 out_unlock:
575         mutex_unlock(&connection->mutex);
576 }
577
578 void gb_connection_disable(struct gb_connection *connection)
579 {
580         mutex_lock(&connection->mutex);
581
582         if (connection->state == GB_CONNECTION_STATE_DISABLED)
583                 goto out_unlock;
584
585         gb_connection_control_disconnected(connection);
586
587         spin_lock_irq(&connection->lock);
588         connection->state = GB_CONNECTION_STATE_DISABLED;
589         gb_connection_cancel_operations(connection, -ESHUTDOWN);
590         spin_unlock_irq(&connection->lock);
591
592         gb_connection_svc_connection_destroy(connection);
593         gb_connection_hd_cport_disable(connection);
594
595 out_unlock:
596         mutex_unlock(&connection->mutex);
597 }
598 EXPORT_SYMBOL_GPL(gb_connection_disable);
599
600 /* Caller must have disabled the connection before destroying it. */
601 void gb_connection_destroy(struct gb_connection *connection)
602 {
603         struct ida *id_map;
604
605         if (!connection)
606                 return;
607
608         mutex_lock(&gb_connection_mutex);
609
610         spin_lock_irq(&gb_connections_lock);
611         list_del(&connection->bundle_links);
612         list_del(&connection->hd_links);
613         spin_unlock_irq(&gb_connections_lock);
614
615         destroy_workqueue(connection->wq);
616
617         id_map = &connection->hd->cport_id_map;
618         ida_simple_remove(id_map, connection->hd_cport_id);
619         connection->hd_cport_id = CPORT_ID_BAD;
620
621         mutex_unlock(&gb_connection_mutex);
622
623         gb_connection_put(connection);
624 }
625 EXPORT_SYMBOL_GPL(gb_connection_destroy);
626
627 void gb_connection_latency_tag_enable(struct gb_connection *connection)
628 {
629         struct gb_host_device *hd = connection->hd;
630         int ret;
631
632         if (!hd->driver->latency_tag_enable)
633                 return;
634
635         ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
636         if (ret) {
637                 dev_err(&connection->hd->dev,
638                         "%s: failed to enable latency tag: %d\n",
639                         connection->name, ret);
640         }
641 }
642 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
643
644 void gb_connection_latency_tag_disable(struct gb_connection *connection)
645 {
646         struct gb_host_device *hd = connection->hd;
647         int ret;
648
649         if (!hd->driver->latency_tag_disable)
650                 return;
651
652         ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
653         if (ret) {
654                 dev_err(&connection->hd->dev,
655                         "%s: failed to disable latency tag: %d\n",
656                         connection->name, ret);
657         }
658 }
659 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);