greybus: core: defer connection creation to driver probe
[cascardo/linux.git] / drivers / staging / greybus / connection.c
1 /*
2  * Greybus connections
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13
14
15 static void gb_connection_kref_release(struct kref *kref);
16
17
18 static DEFINE_SPINLOCK(gb_connections_lock);
19 static DEFINE_MUTEX(gb_connection_mutex);
20
21
22 /* Caller holds gb_connection_mutex. */
23 static struct gb_connection *
24 gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
25 {
26         struct gb_host_device *hd = intf->hd;
27         struct gb_connection *connection;
28
29         list_for_each_entry(connection, &hd->connections, hd_links) {
30                 if (connection->intf == intf &&
31                                 connection->intf_cport_id == cport_id)
32                         return connection;
33         }
34
35         return NULL;
36 }
37
38 static void gb_connection_get(struct gb_connection *connection)
39 {
40         kref_get(&connection->kref);
41 }
42
43 static void gb_connection_put(struct gb_connection *connection)
44 {
45         kref_put(&connection->kref, gb_connection_kref_release);
46 }
47
48 /*
49  * Returns a reference-counted pointer to the connection if found.
50  */
51 static struct gb_connection *
52 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
53 {
54         struct gb_connection *connection;
55         unsigned long flags;
56
57         spin_lock_irqsave(&gb_connections_lock, flags);
58         list_for_each_entry(connection, &hd->connections, hd_links)
59                 if (connection->hd_cport_id == cport_id) {
60                         gb_connection_get(connection);
61                         goto found;
62                 }
63         connection = NULL;
64 found:
65         spin_unlock_irqrestore(&gb_connections_lock, flags);
66
67         return connection;
68 }
69
70 /*
71  * Callback from the host driver to let us know that data has been
72  * received on the bundle.
73  */
74 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
75                         u8 *data, size_t length)
76 {
77         struct gb_connection *connection;
78
79         connection = gb_connection_hd_find(hd, cport_id);
80         if (!connection) {
81                 dev_err(&hd->dev,
82                         "nonexistent connection (%zu bytes dropped)\n", length);
83                 return;
84         }
85         gb_connection_recv(connection, data, length);
86         gb_connection_put(connection);
87 }
88 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
89
90 static void gb_connection_kref_release(struct kref *kref)
91 {
92         struct gb_connection *connection;
93
94         connection = container_of(kref, struct gb_connection, kref);
95
96         kfree(connection);
97 }
98
99 static void gb_connection_init_name(struct gb_connection *connection)
100 {
101         u16 hd_cport_id = connection->hd_cport_id;
102         u16 cport_id = 0;
103         u8 intf_id = 0;
104
105         if (connection->intf) {
106                 intf_id = connection->intf->interface_id;
107                 cport_id = connection->intf_cport_id;
108         }
109
110         snprintf(connection->name, sizeof(connection->name),
111                         "%u/%u:%u", hd_cport_id, intf_id, cport_id);
112 }
113
114 /*
115  * gb_connection_create() - create a Greybus connection
116  * @hd:                 host device of the connection
117  * @hd_cport_id:        host-device cport id, or -1 for dynamic allocation
118  * @intf:               remote interface, or NULL for static connections
119  * @bundle:             remote-interface bundle (may be NULL)
120  * @cport_id:           remote-interface cport id, or 0 for static connections
121  * @protocol_id:        protocol id
122  *
123  * Create a Greybus connection, representing the bidirectional link
124  * between a CPort on a (local) Greybus host device and a CPort on
125  * another Greybus interface.
126  *
127  * A connection also maintains the state of operations sent over the
128  * connection.
129  *
130  * Serialised against concurrent create and destroy using the
131  * gb_connection_mutex.
132  *
133  * Return: A pointer to the new connection if successful, or NULL otherwise.
134  */
135 static struct gb_connection *
136 gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
137                                 struct gb_interface *intf,
138                                 struct gb_bundle *bundle, int cport_id,
139                                 u8 protocol_id)
140 {
141         struct gb_connection *connection;
142         struct ida *id_map = &hd->cport_id_map;
143         int ida_start, ida_end;
144
145         if (hd_cport_id < 0) {
146                 ida_start = 0;
147                 ida_end = hd->num_cports;
148         } else if (hd_cport_id < hd->num_cports) {
149                 ida_start = hd_cport_id;
150                 ida_end = hd_cport_id + 1;
151         } else {
152                 dev_err(&hd->dev, "cport %d not available\n", hd_cport_id);
153                 return NULL;
154         }
155
156         mutex_lock(&gb_connection_mutex);
157
158         if (intf && gb_connection_intf_find(intf, cport_id)) {
159                 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
160                 goto err_unlock;
161         }
162
163         hd_cport_id = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
164         if (hd_cport_id < 0)
165                 goto err_unlock;
166
167         connection = kzalloc(sizeof(*connection), GFP_KERNEL);
168         if (!connection)
169                 goto err_remove_ida;
170
171         connection->hd_cport_id = hd_cport_id;
172         connection->intf_cport_id = cport_id;
173         connection->hd = hd;
174         connection->intf = intf;
175
176         connection->protocol_id = protocol_id;
177
178         connection->bundle = bundle;
179         connection->state = GB_CONNECTION_STATE_DISABLED;
180
181         atomic_set(&connection->op_cycle, 0);
182         mutex_init(&connection->mutex);
183         spin_lock_init(&connection->lock);
184         INIT_LIST_HEAD(&connection->operations);
185
186         connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
187                                          dev_name(&hd->dev), hd_cport_id);
188         if (!connection->wq)
189                 goto err_free_connection;
190
191         kref_init(&connection->kref);
192
193         gb_connection_init_name(connection);
194
195         spin_lock_irq(&gb_connections_lock);
196         list_add(&connection->hd_links, &hd->connections);
197
198         if (bundle)
199                 list_add(&connection->bundle_links, &bundle->connections);
200         else
201                 INIT_LIST_HEAD(&connection->bundle_links);
202
203         spin_unlock_irq(&gb_connections_lock);
204
205         mutex_unlock(&gb_connection_mutex);
206
207         return connection;
208
209 err_free_connection:
210         kfree(connection);
211 err_remove_ida:
212         ida_simple_remove(id_map, hd_cport_id);
213 err_unlock:
214         mutex_unlock(&gb_connection_mutex);
215
216         return NULL;
217 }
218
219 struct gb_connection *
220 gb_connection_create_static(struct gb_host_device *hd,
221                                         u16 hd_cport_id, u8 protocol_id)
222 {
223         return gb_connection_create(hd, hd_cport_id, NULL, NULL, 0,
224                                                                 protocol_id);
225 }
226
227 struct gb_connection *
228 gb_connection_create_dynamic(struct gb_interface *intf,
229                                         struct gb_bundle *bundle,
230                                         u16 cport_id, u8 protocol_id)
231 {
232         return gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
233                                                                 protocol_id);
234 }
235 EXPORT_SYMBOL_GPL(gb_connection_create_dynamic);
236
237 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
238 {
239         struct gb_host_device *hd = connection->hd;
240         int ret;
241
242         if (!hd->driver->cport_enable)
243                 return 0;
244
245         ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
246         if (ret) {
247                 dev_err(&hd->dev,
248                         "failed to enable host cport: %d\n", ret);
249                 return ret;
250         }
251
252         return 0;
253 }
254
255 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
256 {
257         struct gb_host_device *hd = connection->hd;
258
259         if (!hd->driver->cport_disable)
260                 return;
261
262         hd->driver->cport_disable(hd, connection->hd_cport_id);
263 }
264
265 /*
266  * Request the SVC to create a connection from AP's cport to interface's
267  * cport.
268  */
269 static int
270 gb_connection_svc_connection_create(struct gb_connection *connection)
271 {
272         struct gb_host_device *hd = connection->hd;
273         struct gb_interface *intf;
274         int ret;
275
276         if (gb_connection_is_static(connection))
277                 return 0;
278
279         intf = connection->intf;
280         ret = gb_svc_connection_create(hd->svc,
281                         hd->svc->ap_intf_id,
282                         connection->hd_cport_id,
283                         intf->interface_id,
284                         connection->intf_cport_id,
285                         intf->boot_over_unipro);
286         if (ret) {
287                 dev_err(&connection->hd->dev,
288                         "%s: failed to create svc connection: %d\n",
289                         connection->name, ret);
290                 return ret;
291         }
292
293         return 0;
294 }
295
296 static void
297 gb_connection_svc_connection_destroy(struct gb_connection *connection)
298 {
299         if (gb_connection_is_static(connection))
300                 return;
301
302         gb_svc_connection_destroy(connection->hd->svc,
303                                   connection->hd->svc->ap_intf_id,
304                                   connection->hd_cport_id,
305                                   connection->intf->interface_id,
306                                   connection->intf_cport_id);
307 }
308
309 /* Inform Interface about active CPorts */
310 static int gb_connection_control_connected(struct gb_connection *connection)
311 {
312         struct gb_control *control;
313         u16 cport_id = connection->intf_cport_id;
314         int ret;
315
316         if (gb_connection_is_static(connection))
317                 return 0;
318
319         control = connection->intf->control;
320
321         if (connection == control->connection)
322                 return 0;
323
324         ret = gb_control_connected_operation(control, cport_id);
325         if (ret) {
326                 dev_err(&connection->bundle->dev,
327                         "failed to connect cport: %d\n", ret);
328                 return ret;
329         }
330
331         return 0;
332 }
333
334 /* Inform Interface about inactive CPorts */
335 static void
336 gb_connection_control_disconnected(struct gb_connection *connection)
337 {
338         struct gb_control *control;
339         u16 cport_id = connection->intf_cport_id;
340         int ret;
341
342         if (gb_connection_is_static(connection))
343                 return;
344
345         control = connection->intf->control;
346
347         if (connection == control->connection)
348                 return;
349
350         ret = gb_control_disconnected_operation(control, cport_id);
351         if (ret) {
352                 dev_warn(&connection->bundle->dev,
353                          "failed to disconnect cport: %d\n", ret);
354         }
355 }
356
357 /*
358  * Cancel all active operations on a connection.
359  *
360  * Locking: Called with connection lock held and state set to DISABLED.
361  */
362 static void gb_connection_cancel_operations(struct gb_connection *connection,
363                                                 int errno)
364 {
365         struct gb_operation *operation;
366
367         while (!list_empty(&connection->operations)) {
368                 operation = list_last_entry(&connection->operations,
369                                                 struct gb_operation, links);
370                 gb_operation_get(operation);
371                 spin_unlock_irq(&connection->lock);
372
373                 if (gb_operation_is_incoming(operation))
374                         gb_operation_cancel_incoming(operation, errno);
375                 else
376                         gb_operation_cancel(operation, errno);
377
378                 gb_operation_put(operation);
379
380                 spin_lock_irq(&connection->lock);
381         }
382 }
383
384 /*
385  * Cancel all active incoming operations on a connection.
386  *
387  * Locking: Called with connection lock held and state set to ENABLED_TX.
388  */
389 static void
390 gb_connection_flush_incoming_operations(struct gb_connection *connection,
391                                                 int errno)
392 {
393         struct gb_operation *operation;
394         bool incoming;
395
396         while (!list_empty(&connection->operations)) {
397                 incoming = false;
398                 list_for_each_entry(operation, &connection->operations,
399                                                                 links) {
400                         if (gb_operation_is_incoming(operation)) {
401                                 gb_operation_get(operation);
402                                 incoming = true;
403                                 break;
404                         }
405                 }
406
407                 if (!incoming)
408                         break;
409
410                 spin_unlock_irq(&connection->lock);
411
412                 /* FIXME: flush, not cancel? */
413                 gb_operation_cancel_incoming(operation, errno);
414                 gb_operation_put(operation);
415
416                 spin_lock_irq(&connection->lock);
417         }
418 }
419
420 int gb_connection_enable(struct gb_connection *connection,
421                                 gb_request_handler_t handler)
422 {
423         int ret;
424
425         mutex_lock(&connection->mutex);
426
427         if (connection->state == GB_CONNECTION_STATE_ENABLED)
428                 goto out_unlock;
429
430         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
431                 if (!handler)
432                         goto out_unlock;
433
434                 spin_lock_irq(&connection->lock);
435                 connection->handler = handler;
436                 connection->state = GB_CONNECTION_STATE_ENABLED;
437                 spin_unlock_irq(&connection->lock);
438
439                 goto out_unlock;
440         }
441
442         ret = gb_connection_hd_cport_enable(connection);
443         if (ret)
444                 goto err_unlock;
445
446         ret = gb_connection_svc_connection_create(connection);
447         if (ret)
448                 goto err_hd_cport_disable;
449
450         spin_lock_irq(&connection->lock);
451         connection->handler = handler;
452         if (handler)
453                 connection->state = GB_CONNECTION_STATE_ENABLED;
454         else
455                 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
456         spin_unlock_irq(&connection->lock);
457
458         ret = gb_connection_control_connected(connection);
459         if (ret)
460                 goto err_svc_destroy;
461
462 out_unlock:
463         mutex_unlock(&connection->mutex);
464
465         return 0;
466
467 err_svc_destroy:
468         spin_lock_irq(&connection->lock);
469         connection->state = GB_CONNECTION_STATE_DISABLED;
470         gb_connection_cancel_operations(connection, -ESHUTDOWN);
471         connection->handler = NULL;
472         spin_unlock_irq(&connection->lock);
473
474         gb_connection_svc_connection_destroy(connection);
475 err_hd_cport_disable:
476         gb_connection_hd_cport_disable(connection);
477 err_unlock:
478         mutex_unlock(&connection->mutex);
479
480         return ret;
481 }
482 EXPORT_SYMBOL_GPL(gb_connection_enable);
483
484 void gb_connection_disable_rx(struct gb_connection *connection)
485 {
486         mutex_lock(&connection->mutex);
487
488         spin_lock_irq(&connection->lock);
489         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
490                 spin_unlock_irq(&connection->lock);
491                 goto out_unlock;
492         }
493         connection->state = GB_CONNECTION_STATE_ENABLED_TX;
494         gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
495         connection->handler = NULL;
496         spin_unlock_irq(&connection->lock);
497
498 out_unlock:
499         mutex_unlock(&connection->mutex);
500 }
501
502 void gb_connection_disable(struct gb_connection *connection)
503 {
504         mutex_lock(&connection->mutex);
505
506         if (connection->state == GB_CONNECTION_STATE_DISABLED)
507                 goto out_unlock;
508
509         gb_connection_control_disconnected(connection);
510
511         spin_lock_irq(&connection->lock);
512         connection->state = GB_CONNECTION_STATE_DISABLED;
513         gb_connection_cancel_operations(connection, -ESHUTDOWN);
514         connection->handler = NULL;
515         spin_unlock_irq(&connection->lock);
516
517         gb_connection_svc_connection_destroy(connection);
518         gb_connection_hd_cport_disable(connection);
519
520 out_unlock:
521         mutex_unlock(&connection->mutex);
522 }
523 EXPORT_SYMBOL_GPL(gb_connection_disable);
524
525 /* Caller must have disabled the connection before destroying it. */
526 void gb_connection_destroy(struct gb_connection *connection)
527 {
528         struct ida *id_map;
529
530         if (!connection)
531                 return;
532
533         mutex_lock(&gb_connection_mutex);
534
535         spin_lock_irq(&gb_connections_lock);
536         list_del(&connection->bundle_links);
537         list_del(&connection->hd_links);
538         spin_unlock_irq(&gb_connections_lock);
539
540         destroy_workqueue(connection->wq);
541
542         id_map = &connection->hd->cport_id_map;
543         ida_simple_remove(id_map, connection->hd_cport_id);
544         connection->hd_cport_id = CPORT_ID_BAD;
545
546         mutex_unlock(&gb_connection_mutex);
547
548         gb_connection_put(connection);
549 }
550 EXPORT_SYMBOL_GPL(gb_connection_destroy);
551
552 void gb_connection_latency_tag_enable(struct gb_connection *connection)
553 {
554         struct gb_host_device *hd = connection->hd;
555         int ret;
556
557         if (!hd->driver->latency_tag_enable)
558                 return;
559
560         ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
561         if (ret) {
562                 dev_err(&connection->hd->dev,
563                         "%s: failed to enable latency tag: %d\n",
564                         connection->name, ret);
565         }
566 }
567 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
568
569 void gb_connection_latency_tag_disable(struct gb_connection *connection)
570 {
571         struct gb_host_device *hd = connection->hd;
572         int ret;
573
574         if (!hd->driver->latency_tag_disable)
575                 return;
576
577         ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
578         if (ret) {
579                 dev_err(&connection->hd->dev,
580                         "%s: failed to disable latency tag: %d\n",
581                         connection->name, ret);
582         }
583 }
584 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);