greybus: connection: add name field
[cascardo/linux.git] / drivers / staging / greybus / connection.c
1 /*
2  * Greybus connections
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13
14 static DEFINE_SPINLOCK(gb_connections_lock);
15
16 /* This is only used at initialization time; no locking is required. */
17 static struct gb_connection *
18 gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
19 {
20         struct gb_host_device *hd = intf->hd;
21         struct gb_connection *connection;
22
23         list_for_each_entry(connection, &hd->connections, hd_links) {
24                 if (connection->intf == intf &&
25                                 connection->intf_cport_id == cport_id)
26                         return connection;
27         }
28
29         return NULL;
30 }
31
32 static struct gb_connection *
33 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
34 {
35         struct gb_connection *connection;
36         unsigned long flags;
37
38         spin_lock_irqsave(&gb_connections_lock, flags);
39         list_for_each_entry(connection, &hd->connections, hd_links)
40                 if (connection->hd_cport_id == cport_id)
41                         goto found;
42         connection = NULL;
43 found:
44         spin_unlock_irqrestore(&gb_connections_lock, flags);
45
46         return connection;
47 }
48
49 /*
50  * Callback from the host driver to let us know that data has been
51  * received on the bundle.
52  */
53 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
54                         u8 *data, size_t length)
55 {
56         struct gb_connection *connection;
57
58         connection = gb_connection_hd_find(hd, cport_id);
59         if (!connection) {
60                 dev_err(&hd->dev,
61                         "nonexistent connection (%zu bytes dropped)\n", length);
62                 return;
63         }
64         gb_connection_recv(connection, data, length);
65 }
66 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
67
68 static DEFINE_MUTEX(connection_mutex);
69
70 static void gb_connection_kref_release(struct kref *kref)
71 {
72         struct gb_connection *connection;
73
74         connection = container_of(kref, struct gb_connection, kref);
75         destroy_workqueue(connection->wq);
76         kfree(connection);
77         mutex_unlock(&connection_mutex);
78 }
79
80 int svc_update_connection(struct gb_interface *intf,
81                           struct gb_connection *connection)
82 {
83         struct gb_bundle *bundle;
84
85         bundle = gb_bundle_create(intf, GB_SVC_BUNDLE_ID, GREYBUS_CLASS_SVC);
86         if (!bundle)
87                 return -EINVAL;
88
89         connection->bundle = bundle;
90
91         spin_lock_irq(&gb_connections_lock);
92         list_add(&connection->bundle_links, &bundle->connections);
93         spin_unlock_irq(&gb_connections_lock);
94
95         return 0;
96 }
97
98 static void gb_connection_init_name(struct gb_connection *connection)
99 {
100         u16 hd_cport_id = connection->hd_cport_id;
101         u16 cport_id = 0;
102         u8 intf_id = 0;
103
104         if (connection->intf) {
105                 intf_id = connection->intf->interface_id;
106                 cport_id = connection->intf_cport_id;
107         }
108
109         snprintf(connection->name, sizeof(connection->name),
110                         "%hu/%hhu:%hu", hd_cport_id, intf_id, cport_id);
111 }
112
113 /*
114  * gb_connection_create() - create a Greybus connection
115  * @hd:                 host device of the connection
116  * @hd_cport_id:        host-device cport id, or -1 for dynamic allocation
117  * @intf:               remote interface, or NULL for static connections
118  * @bundle:             remote-interface bundle (may be NULL)
119  * @cport_id:           remote-interface cport id, or 0 for static connections
120  * @protocol_id:        protocol id
121  *
122  * Create a Greybus connection, representing the bidirectional link
123  * between a CPort on a (local) Greybus host device and a CPort on
124  * another Greybus interface.
125  *
126  * A connection also maintains the state of operations sent over the
127  * connection.
128  *
129  * Return: A pointer to the new connection if successful, or NULL otherwise.
130  */
131 static struct gb_connection *
132 gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
133                                 struct gb_interface *intf,
134                                 struct gb_bundle *bundle, int cport_id,
135                                 u8 protocol_id)
136 {
137         struct gb_connection *connection;
138         struct ida *id_map = &hd->cport_id_map;
139         int ida_start, ida_end;
140         int retval;
141         u8 major = 0;
142         u8 minor = 1;
143
144         /*
145          * If a manifest tries to reuse a cport, reject it.  We
146          * initialize connections serially so we don't need to worry
147          * about holding the connection lock.
148          */
149         if (bundle && gb_connection_intf_find(bundle->intf, cport_id)) {
150                 dev_err(&bundle->dev, "cport 0x%04hx already connected\n",
151                                 cport_id);
152                 return NULL;
153         }
154
155         if (hd_cport_id < 0) {
156                 ida_start = 0;
157                 ida_end = hd->num_cports;
158         } else if (hd_cport_id < hd->num_cports) {
159                 ida_start = hd_cport_id;
160                 ida_end = hd_cport_id + 1;
161         } else {
162                 dev_err(&hd->dev, "cport %d not available\n", hd_cport_id);
163                 return NULL;
164         }
165
166         hd_cport_id = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
167         if (hd_cport_id < 0)
168                 return NULL;
169
170         connection = kzalloc(sizeof(*connection), GFP_KERNEL);
171         if (!connection)
172                 goto err_remove_ida;
173
174         connection->hd_cport_id = hd_cport_id;
175         connection->intf_cport_id = cport_id;
176         connection->hd = hd;
177         connection->intf = intf;
178
179         connection->protocol_id = protocol_id;
180         connection->major = major;
181         connection->minor = minor;
182
183         connection->bundle = bundle;
184         connection->state = GB_CONNECTION_STATE_DISABLED;
185
186         atomic_set(&connection->op_cycle, 0);
187         spin_lock_init(&connection->lock);
188         INIT_LIST_HEAD(&connection->operations);
189
190         connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
191                                          dev_name(&hd->dev), hd_cport_id);
192         if (!connection->wq)
193                 goto err_free_connection;
194
195         kref_init(&connection->kref);
196
197         gb_connection_init_name(connection);
198
199         spin_lock_irq(&gb_connections_lock);
200         list_add(&connection->hd_links, &hd->connections);
201
202         if (bundle)
203                 list_add(&connection->bundle_links, &bundle->connections);
204         else
205                 INIT_LIST_HEAD(&connection->bundle_links);
206
207         spin_unlock_irq(&gb_connections_lock);
208
209         retval = gb_connection_bind_protocol(connection);
210         if (retval) {
211                 dev_err(&hd->dev, "%d: failed to bind protocol: %d\n",
212                         cport_id, retval);
213                 gb_connection_destroy(connection);
214                 return NULL;
215         }
216
217         return connection;
218
219 err_free_connection:
220         kfree(connection);
221 err_remove_ida:
222         ida_simple_remove(id_map, hd_cport_id);
223
224         return NULL;
225 }
226
227 struct gb_connection *
228 gb_connection_create_static(struct gb_host_device *hd,
229                                         u16 hd_cport_id, u8 protocol_id)
230 {
231         return gb_connection_create(hd, hd_cport_id, NULL, NULL, 0,
232                                                                 protocol_id);
233 }
234
235 struct gb_connection *
236 gb_connection_create_dynamic(struct gb_interface *intf,
237                                         struct gb_bundle *bundle,
238                                         u16 cport_id, u8 protocol_id)
239 {
240         return gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
241                                                                 protocol_id);
242 }
243
244 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
245 {
246         struct gb_host_device *hd = connection->hd;
247         int ret;
248
249         if (!hd->driver->cport_enable)
250                 return 0;
251
252         ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
253         if (ret) {
254                 dev_err(&hd->dev,
255                         "failed to enable host cport: %d\n", ret);
256                 return ret;
257         }
258
259         return 0;
260 }
261
262 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
263 {
264         struct gb_host_device *hd = connection->hd;
265
266         if (!hd->driver->cport_disable)
267                 return;
268
269         hd->driver->cport_disable(hd, connection->hd_cport_id);
270 }
271
272 /*
273  * Cancel all active operations on a connection.
274  *
275  * Should only be called during connection tear down.
276  */
277 static void gb_connection_cancel_operations(struct gb_connection *connection,
278                                                 int errno)
279 {
280         struct gb_operation *operation;
281
282         spin_lock_irq(&connection->lock);
283         while (!list_empty(&connection->operations)) {
284                 operation = list_last_entry(&connection->operations,
285                                                 struct gb_operation, links);
286                 gb_operation_get(operation);
287                 spin_unlock_irq(&connection->lock);
288
289                 if (gb_operation_is_incoming(operation))
290                         gb_operation_cancel_incoming(operation, errno);
291                 else
292                         gb_operation_cancel(operation, errno);
293
294                 gb_operation_put(operation);
295
296                 spin_lock_irq(&connection->lock);
297         }
298         spin_unlock_irq(&connection->lock);
299 }
300
301 /*
302  * Request the SVC to create a connection from AP's cport to interface's
303  * cport.
304  */
305 static int
306 gb_connection_svc_connection_create(struct gb_connection *connection)
307 {
308         struct gb_host_device *hd = connection->hd;
309         struct gb_interface *intf;
310         int ret;
311
312         if (gb_connection_is_static(connection))
313                 return 0;
314
315         intf = connection->bundle->intf;
316         ret = gb_svc_connection_create(hd->svc,
317                         hd->svc->ap_intf_id,
318                         connection->hd_cport_id,
319                         intf->interface_id,
320                         connection->intf_cport_id,
321                         intf->boot_over_unipro);
322         if (ret) {
323                 dev_err(&connection->bundle->dev,
324                         "failed to create svc connection: %d\n", ret);
325                 return ret;
326         }
327
328         return 0;
329 }
330
331 static void
332 gb_connection_svc_connection_destroy(struct gb_connection *connection)
333 {
334         if (gb_connection_is_static(connection))
335                 return;
336
337         gb_svc_connection_destroy(connection->hd->svc,
338                                   connection->hd->svc->ap_intf_id,
339                                   connection->hd_cport_id,
340                                   connection->bundle->intf->interface_id,
341                                   connection->intf_cport_id);
342 }
343
344 /* Inform Interface about active CPorts */
345 static int gb_connection_control_connected(struct gb_connection *connection)
346 {
347         struct gb_protocol *protocol = connection->protocol;
348         struct gb_control *control;
349         u16 cport_id = connection->intf_cport_id;
350         int ret;
351
352         if (protocol->flags & GB_PROTOCOL_SKIP_CONTROL_CONNECTED)
353                 return 0;
354
355         control = connection->bundle->intf->control;
356
357         ret = gb_control_connected_operation(control, cport_id);
358         if (ret) {
359                 dev_err(&connection->bundle->dev,
360                         "failed to connect cport: %d\n", ret);
361                 return ret;
362         }
363
364         return 0;
365 }
366
367 /* Inform Interface about inactive CPorts */
368 static void
369 gb_connection_control_disconnected(struct gb_connection *connection)
370 {
371         struct gb_protocol *protocol = connection->protocol;
372         struct gb_control *control;
373         u16 cport_id = connection->intf_cport_id;
374         int ret;
375
376         if (protocol->flags & GB_PROTOCOL_SKIP_CONTROL_DISCONNECTED)
377                 return;
378
379         control = connection->bundle->intf->control;
380
381         ret = gb_control_disconnected_operation(control, cport_id);
382         if (ret) {
383                 dev_warn(&connection->bundle->dev,
384                          "failed to disconnect cport: %d\n", ret);
385         }
386 }
387
388 /*
389  * Request protocol version supported by the module. We don't need to do
390  * this for SVC as that is initiated by the SVC.
391  */
392 static int gb_connection_protocol_get_version(struct gb_connection *connection)
393 {
394         struct gb_protocol *protocol = connection->protocol;
395         int ret;
396
397         if (protocol->flags & GB_PROTOCOL_SKIP_VERSION)
398                 return 0;
399
400         ret = gb_protocol_get_version(connection);
401         if (ret) {
402                 dev_err(&connection->bundle->dev,
403                         "failed to get protocol version: %d\n", ret);
404                 return ret;
405         }
406
407         return 0;
408 }
409
410 static int gb_connection_init(struct gb_connection *connection)
411 {
412         struct gb_protocol *protocol = connection->protocol;
413         int ret;
414
415         ret = gb_connection_hd_cport_enable(connection);
416         if (ret)
417                 return ret;
418
419         ret = gb_connection_svc_connection_create(connection);
420         if (ret)
421                 goto err_hd_cport_disable;
422
423         ret = gb_connection_control_connected(connection);
424         if (ret)
425                 goto err_svc_destroy;
426
427         /* Need to enable the connection to initialize it */
428         spin_lock_irq(&connection->lock);
429         connection->state = GB_CONNECTION_STATE_ENABLED;
430         spin_unlock_irq(&connection->lock);
431
432         ret = gb_connection_protocol_get_version(connection);
433         if (ret)
434                 goto err_disconnect;
435
436         ret = protocol->connection_init(connection);
437         if (ret)
438                 goto err_disconnect;
439
440         return 0;
441
442 err_disconnect:
443         spin_lock_irq(&connection->lock);
444         connection->state = GB_CONNECTION_STATE_ERROR;
445         spin_unlock_irq(&connection->lock);
446
447         gb_connection_control_disconnected(connection);
448 err_svc_destroy:
449         gb_connection_svc_connection_destroy(connection);
450 err_hd_cport_disable:
451         gb_connection_hd_cport_disable(connection);
452
453         return ret;
454 }
455
456 static void gb_connection_exit(struct gb_connection *connection)
457 {
458         if (!connection->protocol)
459                 return;
460
461         spin_lock_irq(&connection->lock);
462         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
463                 spin_unlock_irq(&connection->lock);
464                 return;
465         }
466         connection->state = GB_CONNECTION_STATE_DESTROYING;
467         spin_unlock_irq(&connection->lock);
468
469         gb_connection_cancel_operations(connection, -ESHUTDOWN);
470
471         connection->protocol->connection_exit(connection);
472         gb_connection_control_disconnected(connection);
473         gb_connection_svc_connection_destroy(connection);
474         gb_connection_hd_cport_disable(connection);
475 }
476
477 /*
478  * Tear down a previously set up connection.
479  */
480 void gb_connection_destroy(struct gb_connection *connection)
481 {
482         struct ida *id_map;
483
484         if (WARN_ON(!connection))
485                 return;
486
487         gb_connection_exit(connection);
488
489         spin_lock_irq(&gb_connections_lock);
490         list_del(&connection->bundle_links);
491         list_del(&connection->hd_links);
492         spin_unlock_irq(&gb_connections_lock);
493
494         if (connection->protocol)
495                 gb_protocol_put(connection->protocol);
496         connection->protocol = NULL;
497
498         id_map = &connection->hd->cport_id_map;
499         ida_simple_remove(id_map, connection->hd_cport_id);
500         connection->hd_cport_id = CPORT_ID_BAD;
501
502         kref_put_mutex(&connection->kref, gb_connection_kref_release,
503                        &connection_mutex);
504 }
505
506 void gb_connection_latency_tag_enable(struct gb_connection *connection)
507 {
508         struct gb_host_device *hd = connection->hd;
509         int ret;
510
511         if (!hd->driver->latency_tag_enable)
512                 return;
513
514         ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
515         if (ret) {
516                 dev_err(&connection->bundle->dev,
517                         "failed to enable latency tag: %d\n", ret);
518         }
519 }
520 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
521
522 void gb_connection_latency_tag_disable(struct gb_connection *connection)
523 {
524         struct gb_host_device *hd = connection->hd;
525         int ret;
526
527         if (!hd->driver->latency_tag_disable)
528                 return;
529
530         ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
531         if (ret) {
532                 dev_err(&connection->bundle->dev,
533                         "failed to disable latency tag: %d\n", ret);
534         }
535 }
536 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
537
538 int gb_connection_bind_protocol(struct gb_connection *connection)
539 {
540         struct gb_protocol *protocol;
541         int ret;
542
543         /* If we already have a protocol bound here, just return */
544         if (connection->protocol)
545                 return 0;
546
547         protocol = gb_protocol_get(connection->protocol_id,
548                                    connection->major,
549                                    connection->minor);
550         if (!protocol) {
551                 dev_warn(&connection->hd->dev,
552                                 "protocol 0x%02hhx version %hhu.%hhu not found\n",
553                                 connection->protocol_id,
554                                 connection->major, connection->minor);
555                 return 0;
556         }
557         connection->protocol = protocol;
558
559         /*
560          * If we have a valid device_id for the interface block, then we have an
561          * active device, so bring up the connection at the same time.
562          */
563         if ((!connection->bundle &&
564              protocol->flags & GB_PROTOCOL_NO_BUNDLE) ||
565             connection->bundle->intf->device_id != GB_DEVICE_ID_BAD) {
566                 ret = gb_connection_init(connection);
567                 if (ret) {
568                         gb_protocol_put(protocol);
569                         connection->protocol = NULL;
570                         return ret;
571                 }
572         }
573
574         return 0;
575 }