greybus: hd: generalise cport allocation
[cascardo/linux.git] / drivers / staging / greybus / connection.c
1 /*
2  * Greybus connections
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13
14
15 static void gb_connection_kref_release(struct kref *kref);
16
17
18 static DEFINE_SPINLOCK(gb_connections_lock);
19 static DEFINE_MUTEX(gb_connection_mutex);
20
21
22 /* Caller holds gb_connection_mutex. */
23 static struct gb_connection *
24 gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
25 {
26         struct gb_host_device *hd = intf->hd;
27         struct gb_connection *connection;
28
29         list_for_each_entry(connection, &hd->connections, hd_links) {
30                 if (connection->intf == intf &&
31                                 connection->intf_cport_id == cport_id)
32                         return connection;
33         }
34
35         return NULL;
36 }
37
38 static void gb_connection_get(struct gb_connection *connection)
39 {
40         kref_get(&connection->kref);
41 }
42
43 static void gb_connection_put(struct gb_connection *connection)
44 {
45         kref_put(&connection->kref, gb_connection_kref_release);
46 }
47
48 /*
49  * Returns a reference-counted pointer to the connection if found.
50  */
51 static struct gb_connection *
52 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
53 {
54         struct gb_connection *connection;
55         unsigned long flags;
56
57         spin_lock_irqsave(&gb_connections_lock, flags);
58         list_for_each_entry(connection, &hd->connections, hd_links)
59                 if (connection->hd_cport_id == cport_id) {
60                         gb_connection_get(connection);
61                         goto found;
62                 }
63         connection = NULL;
64 found:
65         spin_unlock_irqrestore(&gb_connections_lock, flags);
66
67         return connection;
68 }
69
70 /*
71  * Callback from the host driver to let us know that data has been
72  * received on the bundle.
73  */
74 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
75                         u8 *data, size_t length)
76 {
77         struct gb_connection *connection;
78
79         connection = gb_connection_hd_find(hd, cport_id);
80         if (!connection) {
81                 dev_err(&hd->dev,
82                         "nonexistent connection (%zu bytes dropped)\n", length);
83                 return;
84         }
85         gb_connection_recv(connection, data, length);
86         gb_connection_put(connection);
87 }
88 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
89
90 static void gb_connection_kref_release(struct kref *kref)
91 {
92         struct gb_connection *connection;
93
94         connection = container_of(kref, struct gb_connection, kref);
95
96         kfree(connection);
97 }
98
99 static void gb_connection_init_name(struct gb_connection *connection)
100 {
101         u16 hd_cport_id = connection->hd_cport_id;
102         u16 cport_id = 0;
103         u8 intf_id = 0;
104
105         if (connection->intf) {
106                 intf_id = connection->intf->interface_id;
107                 cport_id = connection->intf_cport_id;
108         }
109
110         snprintf(connection->name, sizeof(connection->name),
111                         "%u/%u:%u", hd_cport_id, intf_id, cport_id);
112 }
113
114 /*
115  * _gb_connection_create() - create a Greybus connection
116  * @hd:                 host device of the connection
117  * @hd_cport_id:        host-device cport id, or -1 for dynamic allocation
118  * @intf:               remote interface, or NULL for static connections
119  * @bundle:             remote-interface bundle (may be NULL)
120  * @cport_id:           remote-interface cport id, or 0 for static connections
121  * @handler:            request handler (may be NULL)
122  * @flags:              connection flags
123  *
124  * Create a Greybus connection, representing the bidirectional link
125  * between a CPort on a (local) Greybus host device and a CPort on
126  * another Greybus interface.
127  *
128  * A connection also maintains the state of operations sent over the
129  * connection.
130  *
131  * Serialised against concurrent create and destroy using the
132  * gb_connection_mutex.
133  *
134  * Return: A pointer to the new connection if successful, or an ERR_PTR
135  * otherwise.
136  */
137 static struct gb_connection *
138 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
139                                 struct gb_interface *intf,
140                                 struct gb_bundle *bundle, int cport_id,
141                                 gb_request_handler_t handler,
142                                 unsigned long flags)
143 {
144         struct gb_connection *connection;
145         int ret;
146
147         mutex_lock(&gb_connection_mutex);
148
149         if (intf && gb_connection_intf_find(intf, cport_id)) {
150                 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
151                 ret = -EBUSY;
152                 goto err_unlock;
153         }
154
155         ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
156         if (ret < 0) {
157                 dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
158                 goto err_unlock;
159         }
160         hd_cport_id = ret;
161
162         connection = kzalloc(sizeof(*connection), GFP_KERNEL);
163         if (!connection) {
164                 ret = -ENOMEM;
165                 goto err_hd_cport_release;
166         }
167
168         connection->hd_cport_id = hd_cport_id;
169         connection->intf_cport_id = cport_id;
170         connection->hd = hd;
171         connection->intf = intf;
172         connection->bundle = bundle;
173         connection->handler = handler;
174         connection->flags = flags;
175         if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
176                 connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
177         connection->state = GB_CONNECTION_STATE_DISABLED;
178
179         atomic_set(&connection->op_cycle, 0);
180         mutex_init(&connection->mutex);
181         spin_lock_init(&connection->lock);
182         INIT_LIST_HEAD(&connection->operations);
183
184         connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
185                                          dev_name(&hd->dev), hd_cport_id);
186         if (!connection->wq) {
187                 ret = -ENOMEM;
188                 goto err_free_connection;
189         }
190
191         kref_init(&connection->kref);
192
193         gb_connection_init_name(connection);
194
195         spin_lock_irq(&gb_connections_lock);
196         list_add(&connection->hd_links, &hd->connections);
197
198         if (bundle)
199                 list_add(&connection->bundle_links, &bundle->connections);
200         else
201                 INIT_LIST_HEAD(&connection->bundle_links);
202
203         spin_unlock_irq(&gb_connections_lock);
204
205         mutex_unlock(&gb_connection_mutex);
206
207         return connection;
208
209 err_free_connection:
210         kfree(connection);
211 err_hd_cport_release:
212         gb_hd_cport_release(hd, hd_cport_id);
213 err_unlock:
214         mutex_unlock(&gb_connection_mutex);
215
216         return ERR_PTR(ret);
217 }
218
219 struct gb_connection *
220 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
221                                         gb_request_handler_t handler)
222 {
223         return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
224                                         0);
225 }
226
227 struct gb_connection *
228 gb_connection_create_control(struct gb_interface *intf)
229 {
230         return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL, 0);
231 }
232
233 struct gb_connection *
234 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
235                                         gb_request_handler_t handler)
236 {
237         struct gb_interface *intf = bundle->intf;
238
239         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
240                                         handler, 0);
241 }
242 EXPORT_SYMBOL_GPL(gb_connection_create);
243
244 struct gb_connection *
245 gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
246                                         gb_request_handler_t handler,
247                                         unsigned long flags)
248 {
249         struct gb_interface *intf = bundle->intf;
250
251         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
252                                         handler, flags);
253 }
254 EXPORT_SYMBOL_GPL(gb_connection_create_flags);
255
256 struct gb_connection *
257 gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
258                                         unsigned long flags)
259 {
260         struct gb_interface *intf = bundle->intf;
261
262         flags |= GB_CONNECTION_FLAG_OFFLOADED;
263
264         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
265                                         NULL, flags);
266 }
267 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
268
269 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
270 {
271         struct gb_host_device *hd = connection->hd;
272         int ret;
273
274         if (!hd->driver->cport_enable)
275                 return 0;
276
277         ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
278         if (ret) {
279                 dev_err(&hd->dev,
280                         "failed to enable host cport: %d\n", ret);
281                 return ret;
282         }
283
284         return 0;
285 }
286
287 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
288 {
289         struct gb_host_device *hd = connection->hd;
290
291         if (!hd->driver->cport_disable)
292                 return;
293
294         hd->driver->cport_disable(hd, connection->hd_cport_id);
295 }
296
297 static int
298 gb_connection_hd_cport_features_enable(struct gb_connection *connection)
299 {
300         struct gb_host_device *hd = connection->hd;
301         int ret;
302
303         if (!hd->driver->cport_features_enable)
304                 return 0;
305
306         ret = hd->driver->cport_features_enable(hd, connection->hd_cport_id);
307         if (ret) {
308                 dev_err(&hd->dev, "%s: failed to enable CPort features: %d\n",
309                         connection->name, ret);
310                 return ret;
311         }
312
313         return 0;
314 }
315
316 static void
317 gb_connection_hd_cport_features_disable(struct gb_connection *connection)
318 {
319         struct gb_host_device *hd = connection->hd;
320
321         if (!hd->driver->cport_features_disable)
322                 return;
323
324         hd->driver->cport_features_disable(hd, connection->hd_cport_id);
325 }
326
327 /*
328  * Request the SVC to create a connection from AP's cport to interface's
329  * cport.
330  */
331 static int
332 gb_connection_svc_connection_create(struct gb_connection *connection)
333 {
334         struct gb_host_device *hd = connection->hd;
335         struct gb_interface *intf;
336         u8 cport_flags;
337         int ret;
338
339         if (gb_connection_is_static(connection))
340                 return gb_connection_hd_cport_features_enable(connection);
341
342         intf = connection->intf;
343
344         /*
345          * Enable either E2EFC or CSD, unless no flow control is requested.
346          */
347         cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
348         if (gb_connection_flow_control_disabled(connection)) {
349                 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
350         } else if (gb_connection_e2efc_enabled(connection)) {
351                 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
352                                 GB_SVC_CPORT_FLAG_E2EFC;
353         }
354
355         ret = gb_svc_connection_create(hd->svc,
356                         hd->svc->ap_intf_id,
357                         connection->hd_cport_id,
358                         intf->interface_id,
359                         connection->intf_cport_id,
360                         cport_flags);
361         if (ret) {
362                 dev_err(&connection->hd->dev,
363                         "%s: failed to create svc connection: %d\n",
364                         connection->name, ret);
365                 return ret;
366         }
367
368         ret = gb_connection_hd_cport_features_enable(connection);
369         if (ret) {
370                 gb_svc_connection_destroy(hd->svc, hd->svc->ap_intf_id,
371                                           connection->hd_cport_id,
372                                           intf->interface_id,
373                                           connection->intf_cport_id);
374                 return ret;
375         }
376
377         return 0;
378 }
379
380 static void
381 gb_connection_svc_connection_destroy(struct gb_connection *connection)
382 {
383         gb_connection_hd_cport_features_disable(connection);
384
385         if (gb_connection_is_static(connection))
386                 return;
387
388         gb_svc_connection_destroy(connection->hd->svc,
389                                   connection->hd->svc->ap_intf_id,
390                                   connection->hd_cport_id,
391                                   connection->intf->interface_id,
392                                   connection->intf_cport_id);
393 }
394
395 /* Inform Interface about active CPorts */
396 static int gb_connection_control_connected(struct gb_connection *connection)
397 {
398         struct gb_control *control;
399         u16 cport_id = connection->intf_cport_id;
400         int ret;
401
402         if (gb_connection_is_static(connection))
403                 return 0;
404
405         control = connection->intf->control;
406
407         if (connection == control->connection)
408                 return 0;
409
410         ret = gb_control_connected_operation(control, cport_id);
411         if (ret) {
412                 dev_err(&connection->bundle->dev,
413                         "failed to connect cport: %d\n", ret);
414                 return ret;
415         }
416
417         return 0;
418 }
419
420 /* Inform Interface about inactive CPorts */
421 static void
422 gb_connection_control_disconnected(struct gb_connection *connection)
423 {
424         struct gb_control *control;
425         u16 cport_id = connection->intf_cport_id;
426         int ret;
427
428         if (gb_connection_is_static(connection))
429                 return;
430
431         control = connection->intf->control;
432
433         if (connection == control->connection)
434                 return;
435
436         ret = gb_control_disconnected_operation(control, cport_id);
437         if (ret) {
438                 dev_warn(&connection->bundle->dev,
439                          "failed to disconnect cport: %d\n", ret);
440         }
441 }
442
443 /*
444  * Cancel all active operations on a connection.
445  *
446  * Locking: Called with connection lock held and state set to DISABLED.
447  */
448 static void gb_connection_cancel_operations(struct gb_connection *connection,
449                                                 int errno)
450         __must_hold(&connection->lock)
451 {
452         struct gb_operation *operation;
453
454         while (!list_empty(&connection->operations)) {
455                 operation = list_last_entry(&connection->operations,
456                                                 struct gb_operation, links);
457                 gb_operation_get(operation);
458                 spin_unlock_irq(&connection->lock);
459
460                 if (gb_operation_is_incoming(operation))
461                         gb_operation_cancel_incoming(operation, errno);
462                 else
463                         gb_operation_cancel(operation, errno);
464
465                 gb_operation_put(operation);
466
467                 spin_lock_irq(&connection->lock);
468         }
469 }
470
471 /*
472  * Cancel all active incoming operations on a connection.
473  *
474  * Locking: Called with connection lock held and state set to ENABLED_TX.
475  */
476 static void
477 gb_connection_flush_incoming_operations(struct gb_connection *connection,
478                                                 int errno)
479         __must_hold(&connection->lock)
480 {
481         struct gb_operation *operation;
482         bool incoming;
483
484         while (!list_empty(&connection->operations)) {
485                 incoming = false;
486                 list_for_each_entry(operation, &connection->operations,
487                                                                 links) {
488                         if (gb_operation_is_incoming(operation)) {
489                                 gb_operation_get(operation);
490                                 incoming = true;
491                                 break;
492                         }
493                 }
494
495                 if (!incoming)
496                         break;
497
498                 spin_unlock_irq(&connection->lock);
499
500                 /* FIXME: flush, not cancel? */
501                 gb_operation_cancel_incoming(operation, errno);
502                 gb_operation_put(operation);
503
504                 spin_lock_irq(&connection->lock);
505         }
506 }
507
508 /*
509  * _gb_connection_enable() - enable a connection
510  * @connection:         connection to enable
511  * @rx:                 whether to enable incoming requests
512  *
513  * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
514  * ENABLED_TX->ENABLED state transitions.
515  *
516  * Locking: Caller holds connection->mutex.
517  */
518 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
519 {
520         int ret;
521
522         /* Handle ENABLED_TX -> ENABLED transitions. */
523         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
524                 if (!(connection->handler && rx))
525                         return 0;
526
527                 spin_lock_irq(&connection->lock);
528                 connection->state = GB_CONNECTION_STATE_ENABLED;
529                 spin_unlock_irq(&connection->lock);
530
531                 return 0;
532         }
533
534         ret = gb_connection_hd_cport_enable(connection);
535         if (ret)
536                 return ret;
537
538         ret = gb_connection_svc_connection_create(connection);
539         if (ret)
540                 goto err_hd_cport_disable;
541
542         spin_lock_irq(&connection->lock);
543         if (connection->handler && rx)
544                 connection->state = GB_CONNECTION_STATE_ENABLED;
545         else
546                 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
547         spin_unlock_irq(&connection->lock);
548
549         ret = gb_connection_control_connected(connection);
550         if (ret)
551                 goto err_svc_destroy;
552
553         return 0;
554
555 err_svc_destroy:
556         spin_lock_irq(&connection->lock);
557         connection->state = GB_CONNECTION_STATE_DISABLED;
558         gb_connection_cancel_operations(connection, -ESHUTDOWN);
559         spin_unlock_irq(&connection->lock);
560
561         gb_connection_svc_connection_destroy(connection);
562 err_hd_cport_disable:
563         gb_connection_hd_cport_disable(connection);
564
565         return ret;
566 }
567
568 int gb_connection_enable(struct gb_connection *connection)
569 {
570         int ret = 0;
571
572         mutex_lock(&connection->mutex);
573
574         if (connection->state == GB_CONNECTION_STATE_ENABLED)
575                 goto out_unlock;
576
577         ret = _gb_connection_enable(connection, true);
578 out_unlock:
579         mutex_unlock(&connection->mutex);
580
581         return ret;
582 }
583 EXPORT_SYMBOL_GPL(gb_connection_enable);
584
585 int gb_connection_enable_tx(struct gb_connection *connection)
586 {
587         int ret = 0;
588
589         mutex_lock(&connection->mutex);
590
591         if (connection->state == GB_CONNECTION_STATE_ENABLED) {
592                 ret = -EINVAL;
593                 goto out_unlock;
594         }
595
596         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
597                 goto out_unlock;
598
599         ret = _gb_connection_enable(connection, false);
600 out_unlock:
601         mutex_unlock(&connection->mutex);
602
603         return ret;
604 }
605 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
606
607 void gb_connection_disable_rx(struct gb_connection *connection)
608 {
609         mutex_lock(&connection->mutex);
610
611         spin_lock_irq(&connection->lock);
612         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
613                 spin_unlock_irq(&connection->lock);
614                 goto out_unlock;
615         }
616         connection->state = GB_CONNECTION_STATE_ENABLED_TX;
617         gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
618         spin_unlock_irq(&connection->lock);
619
620 out_unlock:
621         mutex_unlock(&connection->mutex);
622 }
623 EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
624
625 void gb_connection_disable(struct gb_connection *connection)
626 {
627         mutex_lock(&connection->mutex);
628
629         if (connection->state == GB_CONNECTION_STATE_DISABLED)
630                 goto out_unlock;
631
632         gb_connection_control_disconnected(connection);
633
634         spin_lock_irq(&connection->lock);
635         connection->state = GB_CONNECTION_STATE_DISABLED;
636         gb_connection_cancel_operations(connection, -ESHUTDOWN);
637         spin_unlock_irq(&connection->lock);
638
639         gb_connection_svc_connection_destroy(connection);
640         gb_connection_hd_cport_disable(connection);
641
642 out_unlock:
643         mutex_unlock(&connection->mutex);
644 }
645 EXPORT_SYMBOL_GPL(gb_connection_disable);
646
647 /* Caller must have disabled the connection before destroying it. */
648 void gb_connection_destroy(struct gb_connection *connection)
649 {
650         if (!connection)
651                 return;
652
653         mutex_lock(&gb_connection_mutex);
654
655         spin_lock_irq(&gb_connections_lock);
656         list_del(&connection->bundle_links);
657         list_del(&connection->hd_links);
658         spin_unlock_irq(&gb_connections_lock);
659
660         destroy_workqueue(connection->wq);
661
662         gb_hd_cport_release(connection->hd, connection->hd_cport_id);
663         connection->hd_cport_id = CPORT_ID_BAD;
664
665         mutex_unlock(&gb_connection_mutex);
666
667         gb_connection_put(connection);
668 }
669 EXPORT_SYMBOL_GPL(gb_connection_destroy);
670
671 void gb_connection_latency_tag_enable(struct gb_connection *connection)
672 {
673         struct gb_host_device *hd = connection->hd;
674         int ret;
675
676         if (!hd->driver->latency_tag_enable)
677                 return;
678
679         ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
680         if (ret) {
681                 dev_err(&connection->hd->dev,
682                         "%s: failed to enable latency tag: %d\n",
683                         connection->name, ret);
684         }
685 }
686 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
687
688 void gb_connection_latency_tag_disable(struct gb_connection *connection)
689 {
690         struct gb_host_device *hd = connection->hd;
691         int ret;
692
693         if (!hd->driver->latency_tag_disable)
694                 return;
695
696         ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
697         if (ret) {
698                 dev_err(&connection->hd->dev,
699                         "%s: failed to disable latency tag: %d\n",
700                         connection->name, ret);
701         }
702 }
703 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);