greybus: connection: move CPort Buffer configuration out of svc helpers
[cascardo/linux.git] / drivers / staging / greybus / connection.c
1 /*
2  * Greybus connections
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13
14
15 static void gb_connection_kref_release(struct kref *kref);
16
17
18 static DEFINE_SPINLOCK(gb_connections_lock);
19 static DEFINE_MUTEX(gb_connection_mutex);
20
21
22 /* Caller holds gb_connection_mutex. */
23 static struct gb_connection *
24 gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
25 {
26         struct gb_host_device *hd = intf->hd;
27         struct gb_connection *connection;
28
29         list_for_each_entry(connection, &hd->connections, hd_links) {
30                 if (connection->intf == intf &&
31                                 connection->intf_cport_id == cport_id)
32                         return connection;
33         }
34
35         return NULL;
36 }
37
38 static void gb_connection_get(struct gb_connection *connection)
39 {
40         kref_get(&connection->kref);
41 }
42
43 static void gb_connection_put(struct gb_connection *connection)
44 {
45         kref_put(&connection->kref, gb_connection_kref_release);
46 }
47
48 /*
49  * Returns a reference-counted pointer to the connection if found.
50  */
51 static struct gb_connection *
52 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
53 {
54         struct gb_connection *connection;
55         unsigned long flags;
56
57         spin_lock_irqsave(&gb_connections_lock, flags);
58         list_for_each_entry(connection, &hd->connections, hd_links)
59                 if (connection->hd_cport_id == cport_id) {
60                         gb_connection_get(connection);
61                         goto found;
62                 }
63         connection = NULL;
64 found:
65         spin_unlock_irqrestore(&gb_connections_lock, flags);
66
67         return connection;
68 }
69
70 /*
71  * Callback from the host driver to let us know that data has been
72  * received on the bundle.
73  */
74 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
75                         u8 *data, size_t length)
76 {
77         struct gb_connection *connection;
78
79         connection = gb_connection_hd_find(hd, cport_id);
80         if (!connection) {
81                 dev_err(&hd->dev,
82                         "nonexistent connection (%zu bytes dropped)\n", length);
83                 return;
84         }
85         gb_connection_recv(connection, data, length);
86         gb_connection_put(connection);
87 }
88 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
89
90 static void gb_connection_kref_release(struct kref *kref)
91 {
92         struct gb_connection *connection;
93
94         connection = container_of(kref, struct gb_connection, kref);
95
96         kfree(connection);
97 }
98
99 static void gb_connection_init_name(struct gb_connection *connection)
100 {
101         u16 hd_cport_id = connection->hd_cport_id;
102         u16 cport_id = 0;
103         u8 intf_id = 0;
104
105         if (connection->intf) {
106                 intf_id = connection->intf->interface_id;
107                 cport_id = connection->intf_cport_id;
108         }
109
110         snprintf(connection->name, sizeof(connection->name),
111                         "%u/%u:%u", hd_cport_id, intf_id, cport_id);
112 }
113
114 /*
115  * _gb_connection_create() - create a Greybus connection
116  * @hd:                 host device of the connection
117  * @hd_cport_id:        host-device cport id, or -1 for dynamic allocation
118  * @intf:               remote interface, or NULL for static connections
119  * @bundle:             remote-interface bundle (may be NULL)
120  * @cport_id:           remote-interface cport id, or 0 for static connections
121  * @handler:            request handler (may be NULL)
122  * @flags:              connection flags
123  *
124  * Create a Greybus connection, representing the bidirectional link
125  * between a CPort on a (local) Greybus host device and a CPort on
126  * another Greybus interface.
127  *
128  * A connection also maintains the state of operations sent over the
129  * connection.
130  *
131  * Serialised against concurrent create and destroy using the
132  * gb_connection_mutex.
133  *
134  * Return: A pointer to the new connection if successful, or an ERR_PTR
135  * otherwise.
136  */
137 static struct gb_connection *
138 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
139                                 struct gb_interface *intf,
140                                 struct gb_bundle *bundle, int cport_id,
141                                 gb_request_handler_t handler,
142                                 unsigned long flags)
143 {
144         struct gb_connection *connection;
145         int ret;
146
147         mutex_lock(&gb_connection_mutex);
148
149         if (intf && gb_connection_intf_find(intf, cport_id)) {
150                 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
151                 ret = -EBUSY;
152                 goto err_unlock;
153         }
154
155         ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
156         if (ret < 0) {
157                 dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
158                 goto err_unlock;
159         }
160         hd_cport_id = ret;
161
162         connection = kzalloc(sizeof(*connection), GFP_KERNEL);
163         if (!connection) {
164                 ret = -ENOMEM;
165                 goto err_hd_cport_release;
166         }
167
168         connection->hd_cport_id = hd_cport_id;
169         connection->intf_cport_id = cport_id;
170         connection->hd = hd;
171         connection->intf = intf;
172         connection->bundle = bundle;
173         connection->handler = handler;
174         connection->flags = flags;
175         if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
176                 connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
177         connection->state = GB_CONNECTION_STATE_DISABLED;
178
179         atomic_set(&connection->op_cycle, 0);
180         mutex_init(&connection->mutex);
181         spin_lock_init(&connection->lock);
182         INIT_LIST_HEAD(&connection->operations);
183
184         connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
185                                          dev_name(&hd->dev), hd_cport_id);
186         if (!connection->wq) {
187                 ret = -ENOMEM;
188                 goto err_free_connection;
189         }
190
191         kref_init(&connection->kref);
192
193         gb_connection_init_name(connection);
194
195         spin_lock_irq(&gb_connections_lock);
196         list_add(&connection->hd_links, &hd->connections);
197
198         if (bundle)
199                 list_add(&connection->bundle_links, &bundle->connections);
200         else
201                 INIT_LIST_HEAD(&connection->bundle_links);
202
203         spin_unlock_irq(&gb_connections_lock);
204
205         mutex_unlock(&gb_connection_mutex);
206
207         return connection;
208
209 err_free_connection:
210         kfree(connection);
211 err_hd_cport_release:
212         gb_hd_cport_release(hd, hd_cport_id);
213 err_unlock:
214         mutex_unlock(&gb_connection_mutex);
215
216         return ERR_PTR(ret);
217 }
218
219 struct gb_connection *
220 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
221                                         gb_request_handler_t handler)
222 {
223         return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
224                                         0);
225 }
226
227 struct gb_connection *
228 gb_connection_create_control(struct gb_interface *intf)
229 {
230         return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
231                                         GB_CONNECTION_FLAG_CONTROL);
232 }
233
234 struct gb_connection *
235 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
236                                         gb_request_handler_t handler)
237 {
238         struct gb_interface *intf = bundle->intf;
239
240         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
241                                         handler, 0);
242 }
243 EXPORT_SYMBOL_GPL(gb_connection_create);
244
245 struct gb_connection *
246 gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
247                                         gb_request_handler_t handler,
248                                         unsigned long flags)
249 {
250         struct gb_interface *intf = bundle->intf;
251
252         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
253                                         handler, flags);
254 }
255 EXPORT_SYMBOL_GPL(gb_connection_create_flags);
256
257 struct gb_connection *
258 gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
259                                         unsigned long flags)
260 {
261         struct gb_interface *intf = bundle->intf;
262
263         flags |= GB_CONNECTION_FLAG_OFFLOADED;
264
265         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
266                                         NULL, flags);
267 }
268 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
269
270 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
271 {
272         struct gb_host_device *hd = connection->hd;
273         int ret;
274
275         if (!hd->driver->cport_enable)
276                 return 0;
277
278         ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
279         if (ret) {
280                 dev_err(&hd->dev,
281                                 "%s: failed to enable host cport: %d\n",
282                                 connection->name, ret);
283                 return ret;
284         }
285
286         return 0;
287 }
288
289 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
290 {
291         struct gb_host_device *hd = connection->hd;
292         int ret;
293
294         if (!hd->driver->cport_disable)
295                 return;
296
297         ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
298         if (ret) {
299                 dev_err(&hd->dev,
300                                 "%s: failed to disable host cport: %d\n",
301                                 connection->name, ret);
302         }
303 }
304
305 static int
306 gb_connection_hd_cport_features_enable(struct gb_connection *connection)
307 {
308         struct gb_host_device *hd = connection->hd;
309         int ret;
310
311         if (!hd->driver->cport_features_enable)
312                 return 0;
313
314         ret = hd->driver->cport_features_enable(hd, connection->hd_cport_id);
315         if (ret) {
316                 dev_err(&hd->dev, "%s: failed to enable CPort features: %d\n",
317                         connection->name, ret);
318                 return ret;
319         }
320
321         return 0;
322 }
323
324 static void
325 gb_connection_hd_cport_features_disable(struct gb_connection *connection)
326 {
327         struct gb_host_device *hd = connection->hd;
328
329         if (!hd->driver->cport_features_disable)
330                 return;
331
332         hd->driver->cport_features_disable(hd, connection->hd_cport_id);
333 }
334
335 /*
336  * Request the SVC to create a connection from AP's cport to interface's
337  * cport.
338  */
339 static int
340 gb_connection_svc_connection_create(struct gb_connection *connection)
341 {
342         struct gb_host_device *hd = connection->hd;
343         struct gb_interface *intf;
344         u8 cport_flags;
345         int ret;
346
347         if (gb_connection_is_static(connection))
348                 return 0;
349
350         intf = connection->intf;
351
352         /*
353          * Enable either E2EFC or CSD, unless no flow control is requested.
354          */
355         cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
356         if (gb_connection_flow_control_disabled(connection)) {
357                 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
358         } else if (gb_connection_e2efc_enabled(connection)) {
359                 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
360                                 GB_SVC_CPORT_FLAG_E2EFC;
361         }
362
363         ret = gb_svc_connection_create(hd->svc,
364                         hd->svc->ap_intf_id,
365                         connection->hd_cport_id,
366                         intf->interface_id,
367                         connection->intf_cport_id,
368                         cport_flags);
369         if (ret) {
370                 dev_err(&connection->hd->dev,
371                         "%s: failed to create svc connection: %d\n",
372                         connection->name, ret);
373                 return ret;
374         }
375
376         return 0;
377 }
378
379 static void
380 gb_connection_svc_connection_destroy(struct gb_connection *connection)
381 {
382         if (gb_connection_is_static(connection))
383                 return;
384
385         gb_svc_connection_destroy(connection->hd->svc,
386                                   connection->hd->svc->ap_intf_id,
387                                   connection->hd_cport_id,
388                                   connection->intf->interface_id,
389                                   connection->intf_cport_id);
390 }
391
392 /* Inform Interface about active CPorts */
393 static int gb_connection_control_connected(struct gb_connection *connection)
394 {
395         struct gb_control *control;
396         u16 cport_id = connection->intf_cport_id;
397         int ret;
398
399         if (gb_connection_is_static(connection))
400                 return 0;
401
402         /*
403          * HACK: Suppress connected request for the offloaded camera
404          * connection as it is currently not supported by firmware. Note that
405          * the corresponding non-fatal disconnected event is still sent.
406          */
407         if (gb_connection_is_offloaded(connection) &&
408                         connection->flags & GB_CONNECTION_FLAG_CDSI1) {
409                 return 0;
410         }
411
412         if (gb_connection_is_control(connection))
413                 return 0;
414
415         control = connection->intf->control;
416
417         ret = gb_control_connected_operation(control, cport_id);
418         if (ret) {
419                 dev_err(&connection->bundle->dev,
420                         "failed to connect cport: %d\n", ret);
421                 return ret;
422         }
423
424         return 0;
425 }
426
427 /* Inform Interface about inactive CPorts */
428 static void
429 gb_connection_control_disconnected(struct gb_connection *connection)
430 {
431         struct gb_control *control;
432         u16 cport_id = connection->intf_cport_id;
433         int ret;
434
435         if (gb_connection_is_static(connection))
436                 return;
437
438         if (gb_connection_is_control(connection))
439                 return;
440
441         control = connection->intf->control;
442
443         ret = gb_control_disconnected_operation(control, cport_id);
444         if (ret) {
445                 dev_warn(&connection->bundle->dev,
446                          "failed to disconnect cport: %d\n", ret);
447         }
448 }
449
450 /*
451  * Cancel all active operations on a connection.
452  *
453  * Locking: Called with connection lock held and state set to DISABLED.
454  */
455 static void gb_connection_cancel_operations(struct gb_connection *connection,
456                                                 int errno)
457         __must_hold(&connection->lock)
458 {
459         struct gb_operation *operation;
460
461         while (!list_empty(&connection->operations)) {
462                 operation = list_last_entry(&connection->operations,
463                                                 struct gb_operation, links);
464                 gb_operation_get(operation);
465                 spin_unlock_irq(&connection->lock);
466
467                 if (gb_operation_is_incoming(operation))
468                         gb_operation_cancel_incoming(operation, errno);
469                 else
470                         gb_operation_cancel(operation, errno);
471
472                 gb_operation_put(operation);
473
474                 spin_lock_irq(&connection->lock);
475         }
476 }
477
478 /*
479  * Cancel all active incoming operations on a connection.
480  *
481  * Locking: Called with connection lock held and state set to ENABLED_TX.
482  */
483 static void
484 gb_connection_flush_incoming_operations(struct gb_connection *connection,
485                                                 int errno)
486         __must_hold(&connection->lock)
487 {
488         struct gb_operation *operation;
489         bool incoming;
490
491         while (!list_empty(&connection->operations)) {
492                 incoming = false;
493                 list_for_each_entry(operation, &connection->operations,
494                                                                 links) {
495                         if (gb_operation_is_incoming(operation)) {
496                                 gb_operation_get(operation);
497                                 incoming = true;
498                                 break;
499                         }
500                 }
501
502                 if (!incoming)
503                         break;
504
505                 spin_unlock_irq(&connection->lock);
506
507                 /* FIXME: flush, not cancel? */
508                 gb_operation_cancel_incoming(operation, errno);
509                 gb_operation_put(operation);
510
511                 spin_lock_irq(&connection->lock);
512         }
513 }
514
515 /*
516  * _gb_connection_enable() - enable a connection
517  * @connection:         connection to enable
518  * @rx:                 whether to enable incoming requests
519  *
520  * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
521  * ENABLED_TX->ENABLED state transitions.
522  *
523  * Locking: Caller holds connection->mutex.
524  */
525 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
526 {
527         int ret;
528
529         /* Handle ENABLED_TX -> ENABLED transitions. */
530         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
531                 if (!(connection->handler && rx))
532                         return 0;
533
534                 spin_lock_irq(&connection->lock);
535                 connection->state = GB_CONNECTION_STATE_ENABLED;
536                 spin_unlock_irq(&connection->lock);
537
538                 return 0;
539         }
540
541         ret = gb_connection_hd_cport_enable(connection);
542         if (ret)
543                 return ret;
544
545         ret = gb_connection_svc_connection_create(connection);
546         if (ret)
547                 goto err_hd_cport_disable;
548
549         ret = gb_connection_hd_cport_features_enable(connection);
550         if (ret)
551                 goto err_svc_connection_destroy;
552
553         spin_lock_irq(&connection->lock);
554         if (connection->handler && rx)
555                 connection->state = GB_CONNECTION_STATE_ENABLED;
556         else
557                 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
558         spin_unlock_irq(&connection->lock);
559
560         ret = gb_connection_control_connected(connection);
561         if (ret)
562                 goto err_flush_operations;
563
564         return 0;
565
566 err_flush_operations:
567         spin_lock_irq(&connection->lock);
568         connection->state = GB_CONNECTION_STATE_DISABLED;
569         gb_connection_cancel_operations(connection, -ESHUTDOWN);
570         spin_unlock_irq(&connection->lock);
571
572         gb_connection_hd_cport_features_disable(connection);
573 err_svc_connection_destroy:
574         gb_connection_svc_connection_destroy(connection);
575 err_hd_cport_disable:
576         gb_connection_hd_cport_disable(connection);
577
578         return ret;
579 }
580
581 int gb_connection_enable(struct gb_connection *connection)
582 {
583         int ret = 0;
584
585         mutex_lock(&connection->mutex);
586
587         if (connection->state == GB_CONNECTION_STATE_ENABLED)
588                 goto out_unlock;
589
590         ret = _gb_connection_enable(connection, true);
591 out_unlock:
592         mutex_unlock(&connection->mutex);
593
594         return ret;
595 }
596 EXPORT_SYMBOL_GPL(gb_connection_enable);
597
598 int gb_connection_enable_tx(struct gb_connection *connection)
599 {
600         int ret = 0;
601
602         mutex_lock(&connection->mutex);
603
604         if (connection->state == GB_CONNECTION_STATE_ENABLED) {
605                 ret = -EINVAL;
606                 goto out_unlock;
607         }
608
609         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
610                 goto out_unlock;
611
612         ret = _gb_connection_enable(connection, false);
613 out_unlock:
614         mutex_unlock(&connection->mutex);
615
616         return ret;
617 }
618 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
619
620 void gb_connection_disable_rx(struct gb_connection *connection)
621 {
622         mutex_lock(&connection->mutex);
623
624         spin_lock_irq(&connection->lock);
625         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
626                 spin_unlock_irq(&connection->lock);
627                 goto out_unlock;
628         }
629         connection->state = GB_CONNECTION_STATE_ENABLED_TX;
630         gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
631         spin_unlock_irq(&connection->lock);
632
633 out_unlock:
634         mutex_unlock(&connection->mutex);
635 }
636 EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
637
638 void gb_connection_disable(struct gb_connection *connection)
639 {
640         mutex_lock(&connection->mutex);
641
642         if (connection->state == GB_CONNECTION_STATE_DISABLED)
643                 goto out_unlock;
644
645         gb_connection_control_disconnected(connection);
646
647         spin_lock_irq(&connection->lock);
648         connection->state = GB_CONNECTION_STATE_DISABLED;
649         gb_connection_cancel_operations(connection, -ESHUTDOWN);
650         spin_unlock_irq(&connection->lock);
651
652         gb_connection_hd_cport_features_disable(connection);
653         gb_connection_svc_connection_destroy(connection);
654         gb_connection_hd_cport_disable(connection);
655
656 out_unlock:
657         mutex_unlock(&connection->mutex);
658 }
659 EXPORT_SYMBOL_GPL(gb_connection_disable);
660
661 /* Disable a connection without communicating with the remote end. */
662 void gb_connection_disable_forced(struct gb_connection *connection)
663 {
664         mutex_lock(&connection->mutex);
665
666         if (connection->state == GB_CONNECTION_STATE_DISABLED)
667                 goto out_unlock;
668
669         spin_lock_irq(&connection->lock);
670         connection->state = GB_CONNECTION_STATE_DISABLED;
671         gb_connection_cancel_operations(connection, -ESHUTDOWN);
672         spin_unlock_irq(&connection->lock);
673
674         gb_connection_hd_cport_features_disable(connection);
675         gb_connection_svc_connection_destroy(connection);
676         gb_connection_hd_cport_disable(connection);
677
678 out_unlock:
679         mutex_unlock(&connection->mutex);
680 }
681 EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
682
683 /* Caller must have disabled the connection before destroying it. */
684 void gb_connection_destroy(struct gb_connection *connection)
685 {
686         if (!connection)
687                 return;
688
689         if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
690                 gb_connection_disable(connection);
691
692         mutex_lock(&gb_connection_mutex);
693
694         spin_lock_irq(&gb_connections_lock);
695         list_del(&connection->bundle_links);
696         list_del(&connection->hd_links);
697         spin_unlock_irq(&gb_connections_lock);
698
699         destroy_workqueue(connection->wq);
700
701         gb_hd_cport_release(connection->hd, connection->hd_cport_id);
702         connection->hd_cport_id = CPORT_ID_BAD;
703
704         mutex_unlock(&gb_connection_mutex);
705
706         gb_connection_put(connection);
707 }
708 EXPORT_SYMBOL_GPL(gb_connection_destroy);
709
710 void gb_connection_latency_tag_enable(struct gb_connection *connection)
711 {
712         struct gb_host_device *hd = connection->hd;
713         int ret;
714
715         if (!hd->driver->latency_tag_enable)
716                 return;
717
718         ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
719         if (ret) {
720                 dev_err(&connection->hd->dev,
721                         "%s: failed to enable latency tag: %d\n",
722                         connection->name, ret);
723         }
724 }
725 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
726
727 void gb_connection_latency_tag_disable(struct gb_connection *connection)
728 {
729         struct gb_host_device *hd = connection->hd;
730         int ret;
731
732         if (!hd->driver->latency_tag_disable)
733                 return;
734
735         ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
736         if (ret) {
737                 dev_err(&connection->hd->dev,
738                         "%s: failed to disable latency tag: %d\n",
739                         connection->name, ret);
740         }
741 }
742 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);