greybus: connection: add CSD connection flag
[cascardo/linux.git] / drivers / staging / greybus / connection.c
1 /*
2  * Greybus connections
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13
14
15 static void gb_connection_kref_release(struct kref *kref);
16
17
18 static DEFINE_SPINLOCK(gb_connections_lock);
19 static DEFINE_MUTEX(gb_connection_mutex);
20
21
22 /* Caller holds gb_connection_mutex. */
23 static struct gb_connection *
24 gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
25 {
26         struct gb_host_device *hd = intf->hd;
27         struct gb_connection *connection;
28
29         list_for_each_entry(connection, &hd->connections, hd_links) {
30                 if (connection->intf == intf &&
31                                 connection->intf_cport_id == cport_id)
32                         return connection;
33         }
34
35         return NULL;
36 }
37
38 static void gb_connection_get(struct gb_connection *connection)
39 {
40         kref_get(&connection->kref);
41 }
42
43 static void gb_connection_put(struct gb_connection *connection)
44 {
45         kref_put(&connection->kref, gb_connection_kref_release);
46 }
47
48 /*
49  * Returns a reference-counted pointer to the connection if found.
50  */
51 static struct gb_connection *
52 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
53 {
54         struct gb_connection *connection;
55         unsigned long flags;
56
57         spin_lock_irqsave(&gb_connections_lock, flags);
58         list_for_each_entry(connection, &hd->connections, hd_links)
59                 if (connection->hd_cport_id == cport_id) {
60                         gb_connection_get(connection);
61                         goto found;
62                 }
63         connection = NULL;
64 found:
65         spin_unlock_irqrestore(&gb_connections_lock, flags);
66
67         return connection;
68 }
69
70 /*
71  * Callback from the host driver to let us know that data has been
72  * received on the bundle.
73  */
74 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
75                         u8 *data, size_t length)
76 {
77         struct gb_connection *connection;
78
79         connection = gb_connection_hd_find(hd, cport_id);
80         if (!connection) {
81                 dev_err(&hd->dev,
82                         "nonexistent connection (%zu bytes dropped)\n", length);
83                 return;
84         }
85         gb_connection_recv(connection, data, length);
86         gb_connection_put(connection);
87 }
88 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
89
90 static void gb_connection_kref_release(struct kref *kref)
91 {
92         struct gb_connection *connection;
93
94         connection = container_of(kref, struct gb_connection, kref);
95
96         kfree(connection);
97 }
98
99 static void gb_connection_init_name(struct gb_connection *connection)
100 {
101         u16 hd_cport_id = connection->hd_cport_id;
102         u16 cport_id = 0;
103         u8 intf_id = 0;
104
105         if (connection->intf) {
106                 intf_id = connection->intf->interface_id;
107                 cport_id = connection->intf_cport_id;
108         }
109
110         snprintf(connection->name, sizeof(connection->name),
111                         "%u/%u:%u", hd_cport_id, intf_id, cport_id);
112 }
113
114 /*
115  * _gb_connection_create() - create a Greybus connection
116  * @hd:                 host device of the connection
117  * @hd_cport_id:        host-device cport id, or -1 for dynamic allocation
118  * @intf:               remote interface, or NULL for static connections
119  * @bundle:             remote-interface bundle (may be NULL)
120  * @cport_id:           remote-interface cport id, or 0 for static connections
121  * @handler:            request handler (may be NULL)
122  * @flags:              connection flags
123  *
124  * Create a Greybus connection, representing the bidirectional link
125  * between a CPort on a (local) Greybus host device and a CPort on
126  * another Greybus interface.
127  *
128  * A connection also maintains the state of operations sent over the
129  * connection.
130  *
131  * Serialised against concurrent create and destroy using the
132  * gb_connection_mutex.
133  *
134  * Return: A pointer to the new connection if successful, or an ERR_PTR
135  * otherwise.
136  */
137 static struct gb_connection *
138 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
139                                 struct gb_interface *intf,
140                                 struct gb_bundle *bundle, int cport_id,
141                                 gb_request_handler_t handler,
142                                 unsigned long flags)
143 {
144         struct gb_connection *connection;
145         struct ida *id_map = &hd->cport_id_map;
146         int ida_start, ida_end;
147         int ret;
148
149         if (hd_cport_id < 0) {
150                 ida_start = 0;
151                 ida_end = hd->num_cports;
152         } else if (hd_cport_id < hd->num_cports) {
153                 ida_start = hd_cport_id;
154                 ida_end = hd_cport_id + 1;
155         } else {
156                 dev_err(&hd->dev, "cport %d not available\n", hd_cport_id);
157                 return ERR_PTR(-EINVAL);
158         }
159
160         mutex_lock(&gb_connection_mutex);
161
162         if (intf && gb_connection_intf_find(intf, cport_id)) {
163                 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
164                 ret = -EBUSY;
165                 goto err_unlock;
166         }
167
168         ret = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
169         if (ret < 0)
170                 goto err_unlock;
171         hd_cport_id = ret;
172
173         connection = kzalloc(sizeof(*connection), GFP_KERNEL);
174         if (!connection) {
175                 ret = -ENOMEM;
176                 goto err_remove_ida;
177         }
178
179         connection->hd_cport_id = hd_cport_id;
180         connection->intf_cport_id = cport_id;
181         connection->hd = hd;
182         connection->intf = intf;
183         connection->bundle = bundle;
184         connection->handler = handler;
185         connection->flags = flags;
186         connection->state = GB_CONNECTION_STATE_DISABLED;
187
188         atomic_set(&connection->op_cycle, 0);
189         mutex_init(&connection->mutex);
190         spin_lock_init(&connection->lock);
191         INIT_LIST_HEAD(&connection->operations);
192
193         connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
194                                          dev_name(&hd->dev), hd_cport_id);
195         if (!connection->wq) {
196                 ret = -ENOMEM;
197                 goto err_free_connection;
198         }
199
200         kref_init(&connection->kref);
201
202         gb_connection_init_name(connection);
203
204         spin_lock_irq(&gb_connections_lock);
205         list_add(&connection->hd_links, &hd->connections);
206
207         if (bundle)
208                 list_add(&connection->bundle_links, &bundle->connections);
209         else
210                 INIT_LIST_HEAD(&connection->bundle_links);
211
212         spin_unlock_irq(&gb_connections_lock);
213
214         mutex_unlock(&gb_connection_mutex);
215
216         return connection;
217
218 err_free_connection:
219         kfree(connection);
220 err_remove_ida:
221         ida_simple_remove(id_map, hd_cport_id);
222 err_unlock:
223         mutex_unlock(&gb_connection_mutex);
224
225         return ERR_PTR(ret);
226 }
227
228 struct gb_connection *
229 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
230                                         gb_request_handler_t handler)
231 {
232         return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
233                                         0);
234 }
235
236 struct gb_connection *
237 gb_connection_create_control(struct gb_interface *intf)
238 {
239         return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL, 0);
240 }
241
242 struct gb_connection *
243 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
244                                         gb_request_handler_t handler)
245 {
246         struct gb_interface *intf = bundle->intf;
247
248         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
249                                         handler, 0);
250 }
251 EXPORT_SYMBOL_GPL(gb_connection_create);
252
253 struct gb_connection *
254 gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
255                                         gb_request_handler_t handler,
256                                         unsigned long flags)
257 {
258         struct gb_interface *intf = bundle->intf;
259
260         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
261                                         handler, flags);
262 }
263 EXPORT_SYMBOL_GPL(gb_connection_create_flags);
264
265 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
266 {
267         struct gb_host_device *hd = connection->hd;
268         int ret;
269
270         if (!hd->driver->cport_enable)
271                 return 0;
272
273         ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
274         if (ret) {
275                 dev_err(&hd->dev,
276                         "failed to enable host cport: %d\n", ret);
277                 return ret;
278         }
279
280         return 0;
281 }
282
283 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
284 {
285         struct gb_host_device *hd = connection->hd;
286
287         if (!hd->driver->cport_disable)
288                 return;
289
290         hd->driver->cport_disable(hd, connection->hd_cport_id);
291 }
292
293 static int gb_connection_hd_fct_flow_enable(struct gb_connection *connection)
294 {
295         struct gb_host_device *hd = connection->hd;
296         int ret;
297
298         if (!hd->driver->fct_flow_enable)
299                 return 0;
300
301         ret = hd->driver->fct_flow_enable(hd, connection->hd_cport_id);
302         if (ret) {
303                 dev_err(&hd->dev, "%s: failed to enable FCT flow: %d\n",
304                         connection->name, ret);
305                 return ret;
306         }
307
308         return 0;
309 }
310
311 static void gb_connection_hd_fct_flow_disable(struct gb_connection *connection)
312 {
313         struct gb_host_device *hd = connection->hd;
314
315         if (!hd->driver->fct_flow_disable)
316                 return;
317
318         hd->driver->fct_flow_disable(hd, connection->hd_cport_id);
319 }
320
321 /*
322  * Request the SVC to create a connection from AP's cport to interface's
323  * cport.
324  */
325 static int
326 gb_connection_svc_connection_create(struct gb_connection *connection)
327 {
328         struct gb_host_device *hd = connection->hd;
329         struct gb_interface *intf;
330         u8 cport_flags;
331         int ret;
332
333         if (gb_connection_is_static(connection))
334                 return gb_connection_hd_fct_flow_enable(connection);
335
336         intf = connection->intf;
337
338         /* The ES2/ES3 bootrom requires E2EFC, CSD and CSV to be disabled. */
339         cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
340         if (intf->boot_over_unipro) {
341                 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
342         } else if (gb_connection_e2efc_enabled(connection)) {
343                 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
344                                 GB_SVC_CPORT_FLAG_E2EFC;
345         }
346
347         ret = gb_svc_connection_create(hd->svc,
348                         hd->svc->ap_intf_id,
349                         connection->hd_cport_id,
350                         intf->interface_id,
351                         connection->intf_cport_id,
352                         cport_flags);
353         if (ret) {
354                 dev_err(&connection->hd->dev,
355                         "%s: failed to create svc connection: %d\n",
356                         connection->name, ret);
357                 return ret;
358         }
359
360         ret = gb_connection_hd_fct_flow_enable(connection);
361         if (ret) {
362                 gb_svc_connection_destroy(hd->svc, hd->svc->ap_intf_id,
363                                           connection->hd_cport_id,
364                                           intf->interface_id,
365                                           connection->intf_cport_id);
366                 return ret;
367         }
368
369         return 0;
370 }
371
372 static void
373 gb_connection_svc_connection_destroy(struct gb_connection *connection)
374 {
375         gb_connection_hd_fct_flow_disable(connection);
376
377         if (gb_connection_is_static(connection))
378                 return;
379
380         gb_svc_connection_destroy(connection->hd->svc,
381                                   connection->hd->svc->ap_intf_id,
382                                   connection->hd_cport_id,
383                                   connection->intf->interface_id,
384                                   connection->intf_cport_id);
385 }
386
387 /* Inform Interface about active CPorts */
388 static int gb_connection_control_connected(struct gb_connection *connection)
389 {
390         struct gb_control *control;
391         u16 cport_id = connection->intf_cport_id;
392         int ret;
393
394         if (gb_connection_is_static(connection))
395                 return 0;
396
397         control = connection->intf->control;
398
399         if (connection == control->connection)
400                 return 0;
401
402         ret = gb_control_connected_operation(control, cport_id);
403         if (ret) {
404                 dev_err(&connection->bundle->dev,
405                         "failed to connect cport: %d\n", ret);
406                 return ret;
407         }
408
409         return 0;
410 }
411
412 /* Inform Interface about inactive CPorts */
413 static void
414 gb_connection_control_disconnected(struct gb_connection *connection)
415 {
416         struct gb_control *control;
417         u16 cport_id = connection->intf_cport_id;
418         int ret;
419
420         if (gb_connection_is_static(connection))
421                 return;
422
423         control = connection->intf->control;
424
425         if (connection == control->connection)
426                 return;
427
428         ret = gb_control_disconnected_operation(control, cport_id);
429         if (ret) {
430                 dev_warn(&connection->bundle->dev,
431                          "failed to disconnect cport: %d\n", ret);
432         }
433 }
434
435 /*
436  * Cancel all active operations on a connection.
437  *
438  * Locking: Called with connection lock held and state set to DISABLED.
439  */
440 static void gb_connection_cancel_operations(struct gb_connection *connection,
441                                                 int errno)
442         __must_hold(&connection->lock)
443 {
444         struct gb_operation *operation;
445
446         while (!list_empty(&connection->operations)) {
447                 operation = list_last_entry(&connection->operations,
448                                                 struct gb_operation, links);
449                 gb_operation_get(operation);
450                 spin_unlock_irq(&connection->lock);
451
452                 if (gb_operation_is_incoming(operation))
453                         gb_operation_cancel_incoming(operation, errno);
454                 else
455                         gb_operation_cancel(operation, errno);
456
457                 gb_operation_put(operation);
458
459                 spin_lock_irq(&connection->lock);
460         }
461 }
462
463 /*
464  * Cancel all active incoming operations on a connection.
465  *
466  * Locking: Called with connection lock held and state set to ENABLED_TX.
467  */
468 static void
469 gb_connection_flush_incoming_operations(struct gb_connection *connection,
470                                                 int errno)
471         __must_hold(&connection->lock)
472 {
473         struct gb_operation *operation;
474         bool incoming;
475
476         while (!list_empty(&connection->operations)) {
477                 incoming = false;
478                 list_for_each_entry(operation, &connection->operations,
479                                                                 links) {
480                         if (gb_operation_is_incoming(operation)) {
481                                 gb_operation_get(operation);
482                                 incoming = true;
483                                 break;
484                         }
485                 }
486
487                 if (!incoming)
488                         break;
489
490                 spin_unlock_irq(&connection->lock);
491
492                 /* FIXME: flush, not cancel? */
493                 gb_operation_cancel_incoming(operation, errno);
494                 gb_operation_put(operation);
495
496                 spin_lock_irq(&connection->lock);
497         }
498 }
499
500 /*
501  * _gb_connection_enable() - enable a connection
502  * @connection:         connection to enable
503  * @rx:                 whether to enable incoming requests
504  *
505  * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
506  * ENABLED_TX->ENABLED state transitions.
507  *
508  * Locking: Caller holds connection->mutex.
509  */
510 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
511 {
512         int ret;
513
514         /* Handle ENABLED_TX -> ENABLED transitions. */
515         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
516                 if (!(connection->handler && rx))
517                         return 0;
518
519                 spin_lock_irq(&connection->lock);
520                 connection->state = GB_CONNECTION_STATE_ENABLED;
521                 spin_unlock_irq(&connection->lock);
522
523                 return 0;
524         }
525
526         ret = gb_connection_hd_cport_enable(connection);
527         if (ret)
528                 return ret;
529
530         ret = gb_connection_svc_connection_create(connection);
531         if (ret)
532                 goto err_hd_cport_disable;
533
534         spin_lock_irq(&connection->lock);
535         if (connection->handler && rx)
536                 connection->state = GB_CONNECTION_STATE_ENABLED;
537         else
538                 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
539         spin_unlock_irq(&connection->lock);
540
541         ret = gb_connection_control_connected(connection);
542         if (ret)
543                 goto err_svc_destroy;
544
545         return 0;
546
547 err_svc_destroy:
548         spin_lock_irq(&connection->lock);
549         connection->state = GB_CONNECTION_STATE_DISABLED;
550         gb_connection_cancel_operations(connection, -ESHUTDOWN);
551         spin_unlock_irq(&connection->lock);
552
553         gb_connection_svc_connection_destroy(connection);
554 err_hd_cport_disable:
555         gb_connection_hd_cport_disable(connection);
556
557         return ret;
558 }
559
560 int gb_connection_enable(struct gb_connection *connection)
561 {
562         int ret = 0;
563
564         mutex_lock(&connection->mutex);
565
566         if (connection->state == GB_CONNECTION_STATE_ENABLED)
567                 goto out_unlock;
568
569         ret = _gb_connection_enable(connection, true);
570 out_unlock:
571         mutex_unlock(&connection->mutex);
572
573         return ret;
574 }
575 EXPORT_SYMBOL_GPL(gb_connection_enable);
576
577 int gb_connection_enable_tx(struct gb_connection *connection)
578 {
579         int ret = 0;
580
581         mutex_lock(&connection->mutex);
582
583         if (connection->state == GB_CONNECTION_STATE_ENABLED) {
584                 ret = -EINVAL;
585                 goto out_unlock;
586         }
587
588         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
589                 goto out_unlock;
590
591         ret = _gb_connection_enable(connection, false);
592 out_unlock:
593         mutex_unlock(&connection->mutex);
594
595         return ret;
596 }
597 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
598
599 void gb_connection_disable_rx(struct gb_connection *connection)
600 {
601         mutex_lock(&connection->mutex);
602
603         spin_lock_irq(&connection->lock);
604         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
605                 spin_unlock_irq(&connection->lock);
606                 goto out_unlock;
607         }
608         connection->state = GB_CONNECTION_STATE_ENABLED_TX;
609         gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
610         spin_unlock_irq(&connection->lock);
611
612 out_unlock:
613         mutex_unlock(&connection->mutex);
614 }
615
616 void gb_connection_disable(struct gb_connection *connection)
617 {
618         mutex_lock(&connection->mutex);
619
620         if (connection->state == GB_CONNECTION_STATE_DISABLED)
621                 goto out_unlock;
622
623         gb_connection_control_disconnected(connection);
624
625         spin_lock_irq(&connection->lock);
626         connection->state = GB_CONNECTION_STATE_DISABLED;
627         gb_connection_cancel_operations(connection, -ESHUTDOWN);
628         spin_unlock_irq(&connection->lock);
629
630         gb_connection_svc_connection_destroy(connection);
631         gb_connection_hd_cport_disable(connection);
632
633 out_unlock:
634         mutex_unlock(&connection->mutex);
635 }
636 EXPORT_SYMBOL_GPL(gb_connection_disable);
637
638 /* Caller must have disabled the connection before destroying it. */
639 void gb_connection_destroy(struct gb_connection *connection)
640 {
641         struct ida *id_map;
642
643         if (!connection)
644                 return;
645
646         mutex_lock(&gb_connection_mutex);
647
648         spin_lock_irq(&gb_connections_lock);
649         list_del(&connection->bundle_links);
650         list_del(&connection->hd_links);
651         spin_unlock_irq(&gb_connections_lock);
652
653         destroy_workqueue(connection->wq);
654
655         id_map = &connection->hd->cport_id_map;
656         ida_simple_remove(id_map, connection->hd_cport_id);
657         connection->hd_cport_id = CPORT_ID_BAD;
658
659         mutex_unlock(&gb_connection_mutex);
660
661         gb_connection_put(connection);
662 }
663 EXPORT_SYMBOL_GPL(gb_connection_destroy);
664
665 void gb_connection_latency_tag_enable(struct gb_connection *connection)
666 {
667         struct gb_host_device *hd = connection->hd;
668         int ret;
669
670         if (!hd->driver->latency_tag_enable)
671                 return;
672
673         ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
674         if (ret) {
675                 dev_err(&connection->hd->dev,
676                         "%s: failed to enable latency tag: %d\n",
677                         connection->name, ret);
678         }
679 }
680 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
681
682 void gb_connection_latency_tag_disable(struct gb_connection *connection)
683 {
684         struct gb_host_device *hd = connection->hd;
685         int ret;
686
687         if (!hd->driver->latency_tag_disable)
688                 return;
689
690         ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
691         if (ret) {
692                 dev_err(&connection->hd->dev,
693                         "%s: failed to disable latency tag: %d\n",
694                         connection->name, ret);
695         }
696 }
697 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);