greybus: connection: drop the legacy protocol-id parameter
[cascardo/linux.git] / drivers / staging / greybus / connection.c
1 /*
2  * Greybus connections
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13
14
15 static void gb_connection_kref_release(struct kref *kref);
16
17
18 static DEFINE_SPINLOCK(gb_connections_lock);
19 static DEFINE_MUTEX(gb_connection_mutex);
20
21
22 /* Caller holds gb_connection_mutex. */
23 static struct gb_connection *
24 gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
25 {
26         struct gb_host_device *hd = intf->hd;
27         struct gb_connection *connection;
28
29         list_for_each_entry(connection, &hd->connections, hd_links) {
30                 if (connection->intf == intf &&
31                                 connection->intf_cport_id == cport_id)
32                         return connection;
33         }
34
35         return NULL;
36 }
37
38 static void gb_connection_get(struct gb_connection *connection)
39 {
40         kref_get(&connection->kref);
41 }
42
43 static void gb_connection_put(struct gb_connection *connection)
44 {
45         kref_put(&connection->kref, gb_connection_kref_release);
46 }
47
48 /*
49  * Returns a reference-counted pointer to the connection if found.
50  */
51 static struct gb_connection *
52 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
53 {
54         struct gb_connection *connection;
55         unsigned long flags;
56
57         spin_lock_irqsave(&gb_connections_lock, flags);
58         list_for_each_entry(connection, &hd->connections, hd_links)
59                 if (connection->hd_cport_id == cport_id) {
60                         gb_connection_get(connection);
61                         goto found;
62                 }
63         connection = NULL;
64 found:
65         spin_unlock_irqrestore(&gb_connections_lock, flags);
66
67         return connection;
68 }
69
70 /*
71  * Callback from the host driver to let us know that data has been
72  * received on the bundle.
73  */
74 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
75                         u8 *data, size_t length)
76 {
77         struct gb_connection *connection;
78
79         connection = gb_connection_hd_find(hd, cport_id);
80         if (!connection) {
81                 dev_err(&hd->dev,
82                         "nonexistent connection (%zu bytes dropped)\n", length);
83                 return;
84         }
85         gb_connection_recv(connection, data, length);
86         gb_connection_put(connection);
87 }
88 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
89
90 static void gb_connection_kref_release(struct kref *kref)
91 {
92         struct gb_connection *connection;
93
94         connection = container_of(kref, struct gb_connection, kref);
95
96         kfree(connection);
97 }
98
99 static void gb_connection_init_name(struct gb_connection *connection)
100 {
101         u16 hd_cport_id = connection->hd_cport_id;
102         u16 cport_id = 0;
103         u8 intf_id = 0;
104
105         if (connection->intf) {
106                 intf_id = connection->intf->interface_id;
107                 cport_id = connection->intf_cport_id;
108         }
109
110         snprintf(connection->name, sizeof(connection->name),
111                         "%u/%u:%u", hd_cport_id, intf_id, cport_id);
112 }
113
114 /*
115  * gb_connection_create() - create a Greybus connection
116  * @hd:                 host device of the connection
117  * @hd_cport_id:        host-device cport id, or -1 for dynamic allocation
118  * @intf:               remote interface, or NULL for static connections
119  * @bundle:             remote-interface bundle (may be NULL)
120  * @cport_id:           remote-interface cport id, or 0 for static connections
121  *
122  * Create a Greybus connection, representing the bidirectional link
123  * between a CPort on a (local) Greybus host device and a CPort on
124  * another Greybus interface.
125  *
126  * A connection also maintains the state of operations sent over the
127  * connection.
128  *
129  * Serialised against concurrent create and destroy using the
130  * gb_connection_mutex.
131  *
132  * Return: A pointer to the new connection if successful, or NULL otherwise.
133  */
134 static struct gb_connection *
135 gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
136                                 struct gb_interface *intf,
137                                 struct gb_bundle *bundle, int cport_id)
138 {
139         struct gb_connection *connection;
140         struct ida *id_map = &hd->cport_id_map;
141         int ida_start, ida_end;
142
143         if (hd_cport_id < 0) {
144                 ida_start = 0;
145                 ida_end = hd->num_cports;
146         } else if (hd_cport_id < hd->num_cports) {
147                 ida_start = hd_cport_id;
148                 ida_end = hd_cport_id + 1;
149         } else {
150                 dev_err(&hd->dev, "cport %d not available\n", hd_cport_id);
151                 return NULL;
152         }
153
154         mutex_lock(&gb_connection_mutex);
155
156         if (intf && gb_connection_intf_find(intf, cport_id)) {
157                 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
158                 goto err_unlock;
159         }
160
161         hd_cport_id = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
162         if (hd_cport_id < 0)
163                 goto err_unlock;
164
165         connection = kzalloc(sizeof(*connection), GFP_KERNEL);
166         if (!connection)
167                 goto err_remove_ida;
168
169         connection->hd_cport_id = hd_cport_id;
170         connection->intf_cport_id = cport_id;
171         connection->hd = hd;
172         connection->intf = intf;
173
174         connection->bundle = bundle;
175         connection->state = GB_CONNECTION_STATE_DISABLED;
176
177         atomic_set(&connection->op_cycle, 0);
178         mutex_init(&connection->mutex);
179         spin_lock_init(&connection->lock);
180         INIT_LIST_HEAD(&connection->operations);
181
182         connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
183                                          dev_name(&hd->dev), hd_cport_id);
184         if (!connection->wq)
185                 goto err_free_connection;
186
187         kref_init(&connection->kref);
188
189         gb_connection_init_name(connection);
190
191         spin_lock_irq(&gb_connections_lock);
192         list_add(&connection->hd_links, &hd->connections);
193
194         if (bundle)
195                 list_add(&connection->bundle_links, &bundle->connections);
196         else
197                 INIT_LIST_HEAD(&connection->bundle_links);
198
199         spin_unlock_irq(&gb_connections_lock);
200
201         mutex_unlock(&gb_connection_mutex);
202
203         return connection;
204
205 err_free_connection:
206         kfree(connection);
207 err_remove_ida:
208         ida_simple_remove(id_map, hd_cport_id);
209 err_unlock:
210         mutex_unlock(&gb_connection_mutex);
211
212         return NULL;
213 }
214
215 struct gb_connection *
216 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id)
217 {
218         return gb_connection_create(hd, hd_cport_id, NULL, NULL, 0);
219 }
220
221 struct gb_connection *
222 gb_connection_create_control(struct gb_interface *intf)
223 {
224         return gb_connection_create(intf->hd, -1, intf, NULL, 0);
225 }
226
227 struct gb_connection *
228 gb_connection_create_dynamic(struct gb_interface *intf,
229                                         struct gb_bundle *bundle,
230                                         u16 cport_id)
231 {
232         return gb_connection_create(intf->hd, -1, intf, bundle, cport_id);
233 }
234 EXPORT_SYMBOL_GPL(gb_connection_create_dynamic);
235
236 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
237 {
238         struct gb_host_device *hd = connection->hd;
239         int ret;
240
241         if (!hd->driver->cport_enable)
242                 return 0;
243
244         ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
245         if (ret) {
246                 dev_err(&hd->dev,
247                         "failed to enable host cport: %d\n", ret);
248                 return ret;
249         }
250
251         return 0;
252 }
253
254 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
255 {
256         struct gb_host_device *hd = connection->hd;
257
258         if (!hd->driver->cport_disable)
259                 return;
260
261         hd->driver->cport_disable(hd, connection->hd_cport_id);
262 }
263
264 /*
265  * Request the SVC to create a connection from AP's cport to interface's
266  * cport.
267  */
268 static int
269 gb_connection_svc_connection_create(struct gb_connection *connection)
270 {
271         struct gb_host_device *hd = connection->hd;
272         struct gb_interface *intf;
273         int ret;
274
275         if (gb_connection_is_static(connection))
276                 return 0;
277
278         intf = connection->intf;
279         ret = gb_svc_connection_create(hd->svc,
280                         hd->svc->ap_intf_id,
281                         connection->hd_cport_id,
282                         intf->interface_id,
283                         connection->intf_cport_id,
284                         intf->boot_over_unipro);
285         if (ret) {
286                 dev_err(&connection->hd->dev,
287                         "%s: failed to create svc connection: %d\n",
288                         connection->name, ret);
289                 return ret;
290         }
291
292         return 0;
293 }
294
295 static void
296 gb_connection_svc_connection_destroy(struct gb_connection *connection)
297 {
298         if (gb_connection_is_static(connection))
299                 return;
300
301         gb_svc_connection_destroy(connection->hd->svc,
302                                   connection->hd->svc->ap_intf_id,
303                                   connection->hd_cport_id,
304                                   connection->intf->interface_id,
305                                   connection->intf_cport_id);
306 }
307
308 /* Inform Interface about active CPorts */
309 static int gb_connection_control_connected(struct gb_connection *connection)
310 {
311         struct gb_control *control;
312         u16 cport_id = connection->intf_cport_id;
313         int ret;
314
315         if (gb_connection_is_static(connection))
316                 return 0;
317
318         control = connection->intf->control;
319
320         if (connection == control->connection)
321                 return 0;
322
323         ret = gb_control_connected_operation(control, cport_id);
324         if (ret) {
325                 dev_err(&connection->bundle->dev,
326                         "failed to connect cport: %d\n", ret);
327                 return ret;
328         }
329
330         return 0;
331 }
332
333 /* Inform Interface about inactive CPorts */
334 static void
335 gb_connection_control_disconnected(struct gb_connection *connection)
336 {
337         struct gb_control *control;
338         u16 cport_id = connection->intf_cport_id;
339         int ret;
340
341         if (gb_connection_is_static(connection))
342                 return;
343
344         control = connection->intf->control;
345
346         if (connection == control->connection)
347                 return;
348
349         ret = gb_control_disconnected_operation(control, cport_id);
350         if (ret) {
351                 dev_warn(&connection->bundle->dev,
352                          "failed to disconnect cport: %d\n", ret);
353         }
354 }
355
356 /*
357  * Cancel all active operations on a connection.
358  *
359  * Locking: Called with connection lock held and state set to DISABLED.
360  */
361 static void gb_connection_cancel_operations(struct gb_connection *connection,
362                                                 int errno)
363 {
364         struct gb_operation *operation;
365
366         while (!list_empty(&connection->operations)) {
367                 operation = list_last_entry(&connection->operations,
368                                                 struct gb_operation, links);
369                 gb_operation_get(operation);
370                 spin_unlock_irq(&connection->lock);
371
372                 if (gb_operation_is_incoming(operation))
373                         gb_operation_cancel_incoming(operation, errno);
374                 else
375                         gb_operation_cancel(operation, errno);
376
377                 gb_operation_put(operation);
378
379                 spin_lock_irq(&connection->lock);
380         }
381 }
382
383 /*
384  * Cancel all active incoming operations on a connection.
385  *
386  * Locking: Called with connection lock held and state set to ENABLED_TX.
387  */
388 static void
389 gb_connection_flush_incoming_operations(struct gb_connection *connection,
390                                                 int errno)
391 {
392         struct gb_operation *operation;
393         bool incoming;
394
395         while (!list_empty(&connection->operations)) {
396                 incoming = false;
397                 list_for_each_entry(operation, &connection->operations,
398                                                                 links) {
399                         if (gb_operation_is_incoming(operation)) {
400                                 gb_operation_get(operation);
401                                 incoming = true;
402                                 break;
403                         }
404                 }
405
406                 if (!incoming)
407                         break;
408
409                 spin_unlock_irq(&connection->lock);
410
411                 /* FIXME: flush, not cancel? */
412                 gb_operation_cancel_incoming(operation, errno);
413                 gb_operation_put(operation);
414
415                 spin_lock_irq(&connection->lock);
416         }
417 }
418
419 int gb_connection_enable(struct gb_connection *connection,
420                                 gb_request_handler_t handler)
421 {
422         int ret;
423
424         mutex_lock(&connection->mutex);
425
426         if (connection->state == GB_CONNECTION_STATE_ENABLED)
427                 goto out_unlock;
428
429         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
430                 if (!handler)
431                         goto out_unlock;
432
433                 spin_lock_irq(&connection->lock);
434                 connection->handler = handler;
435                 connection->state = GB_CONNECTION_STATE_ENABLED;
436                 spin_unlock_irq(&connection->lock);
437
438                 goto out_unlock;
439         }
440
441         ret = gb_connection_hd_cport_enable(connection);
442         if (ret)
443                 goto err_unlock;
444
445         ret = gb_connection_svc_connection_create(connection);
446         if (ret)
447                 goto err_hd_cport_disable;
448
449         spin_lock_irq(&connection->lock);
450         connection->handler = handler;
451         if (handler)
452                 connection->state = GB_CONNECTION_STATE_ENABLED;
453         else
454                 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
455         spin_unlock_irq(&connection->lock);
456
457         ret = gb_connection_control_connected(connection);
458         if (ret)
459                 goto err_svc_destroy;
460
461 out_unlock:
462         mutex_unlock(&connection->mutex);
463
464         return 0;
465
466 err_svc_destroy:
467         spin_lock_irq(&connection->lock);
468         connection->state = GB_CONNECTION_STATE_DISABLED;
469         gb_connection_cancel_operations(connection, -ESHUTDOWN);
470         connection->handler = NULL;
471         spin_unlock_irq(&connection->lock);
472
473         gb_connection_svc_connection_destroy(connection);
474 err_hd_cport_disable:
475         gb_connection_hd_cport_disable(connection);
476 err_unlock:
477         mutex_unlock(&connection->mutex);
478
479         return ret;
480 }
481 EXPORT_SYMBOL_GPL(gb_connection_enable);
482
483 void gb_connection_disable_rx(struct gb_connection *connection)
484 {
485         mutex_lock(&connection->mutex);
486
487         spin_lock_irq(&connection->lock);
488         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
489                 spin_unlock_irq(&connection->lock);
490                 goto out_unlock;
491         }
492         connection->state = GB_CONNECTION_STATE_ENABLED_TX;
493         gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
494         connection->handler = NULL;
495         spin_unlock_irq(&connection->lock);
496
497 out_unlock:
498         mutex_unlock(&connection->mutex);
499 }
500
501 void gb_connection_disable(struct gb_connection *connection)
502 {
503         mutex_lock(&connection->mutex);
504
505         if (connection->state == GB_CONNECTION_STATE_DISABLED)
506                 goto out_unlock;
507
508         gb_connection_control_disconnected(connection);
509
510         spin_lock_irq(&connection->lock);
511         connection->state = GB_CONNECTION_STATE_DISABLED;
512         gb_connection_cancel_operations(connection, -ESHUTDOWN);
513         connection->handler = NULL;
514         spin_unlock_irq(&connection->lock);
515
516         gb_connection_svc_connection_destroy(connection);
517         gb_connection_hd_cport_disable(connection);
518
519 out_unlock:
520         mutex_unlock(&connection->mutex);
521 }
522 EXPORT_SYMBOL_GPL(gb_connection_disable);
523
524 /* Caller must have disabled the connection before destroying it. */
525 void gb_connection_destroy(struct gb_connection *connection)
526 {
527         struct ida *id_map;
528
529         if (!connection)
530                 return;
531
532         mutex_lock(&gb_connection_mutex);
533
534         spin_lock_irq(&gb_connections_lock);
535         list_del(&connection->bundle_links);
536         list_del(&connection->hd_links);
537         spin_unlock_irq(&gb_connections_lock);
538
539         destroy_workqueue(connection->wq);
540
541         id_map = &connection->hd->cport_id_map;
542         ida_simple_remove(id_map, connection->hd_cport_id);
543         connection->hd_cport_id = CPORT_ID_BAD;
544
545         mutex_unlock(&gb_connection_mutex);
546
547         gb_connection_put(connection);
548 }
549 EXPORT_SYMBOL_GPL(gb_connection_destroy);
550
551 void gb_connection_latency_tag_enable(struct gb_connection *connection)
552 {
553         struct gb_host_device *hd = connection->hd;
554         int ret;
555
556         if (!hd->driver->latency_tag_enable)
557                 return;
558
559         ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
560         if (ret) {
561                 dev_err(&connection->hd->dev,
562                         "%s: failed to enable latency tag: %d\n",
563                         connection->name, ret);
564         }
565 }
566 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
567
568 void gb_connection_latency_tag_disable(struct gb_connection *connection)
569 {
570         struct gb_host_device *hd = connection->hd;
571         int ret;
572
573         if (!hd->driver->latency_tag_disable)
574                 return;
575
576         ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
577         if (ret) {
578                 dev_err(&connection->hd->dev,
579                         "%s: failed to disable latency tag: %d\n",
580                         connection->name, ret);
581         }
582 }
583 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);