greybus: connection: clean up operation cancellation on disable
[cascardo/linux.git] / drivers / staging / greybus / connection.c
1 /*
2  * Greybus connections
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13
14
15 static int gb_connection_bind_protocol(struct gb_connection *connection);
16 static void gb_connection_unbind_protocol(struct gb_connection *connection);
17
18
19 static DEFINE_SPINLOCK(gb_connections_lock);
20
21 /* This is only used at initialization time; no locking is required. */
22 static struct gb_connection *
23 gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
24 {
25         struct gb_host_device *hd = intf->hd;
26         struct gb_connection *connection;
27
28         list_for_each_entry(connection, &hd->connections, hd_links) {
29                 if (connection->intf == intf &&
30                                 connection->intf_cport_id == cport_id)
31                         return connection;
32         }
33
34         return NULL;
35 }
36
37 static struct gb_connection *
38 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
39 {
40         struct gb_connection *connection;
41         unsigned long flags;
42
43         spin_lock_irqsave(&gb_connections_lock, flags);
44         list_for_each_entry(connection, &hd->connections, hd_links)
45                 if (connection->hd_cport_id == cport_id)
46                         goto found;
47         connection = NULL;
48 found:
49         spin_unlock_irqrestore(&gb_connections_lock, flags);
50
51         return connection;
52 }
53
54 /*
55  * Callback from the host driver to let us know that data has been
56  * received on the bundle.
57  */
58 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
59                         u8 *data, size_t length)
60 {
61         struct gb_connection *connection;
62
63         connection = gb_connection_hd_find(hd, cport_id);
64         if (!connection) {
65                 dev_err(&hd->dev,
66                         "nonexistent connection (%zu bytes dropped)\n", length);
67                 return;
68         }
69         gb_connection_recv(connection, data, length);
70 }
71 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
72
73 static DEFINE_MUTEX(connection_mutex);
74
75 static void gb_connection_kref_release(struct kref *kref)
76 {
77         struct gb_connection *connection;
78
79         connection = container_of(kref, struct gb_connection, kref);
80         destroy_workqueue(connection->wq);
81         kfree(connection);
82         mutex_unlock(&connection_mutex);
83 }
84
85 static void gb_connection_init_name(struct gb_connection *connection)
86 {
87         u16 hd_cport_id = connection->hd_cport_id;
88         u16 cport_id = 0;
89         u8 intf_id = 0;
90
91         if (connection->intf) {
92                 intf_id = connection->intf->interface_id;
93                 cport_id = connection->intf_cport_id;
94         }
95
96         snprintf(connection->name, sizeof(connection->name),
97                         "%u/%u:%u", hd_cport_id, intf_id, cport_id);
98 }
99
100 /*
101  * gb_connection_create() - create a Greybus connection
102  * @hd:                 host device of the connection
103  * @hd_cport_id:        host-device cport id, or -1 for dynamic allocation
104  * @intf:               remote interface, or NULL for static connections
105  * @bundle:             remote-interface bundle (may be NULL)
106  * @cport_id:           remote-interface cport id, or 0 for static connections
107  * @protocol_id:        protocol id
108  *
109  * Create a Greybus connection, representing the bidirectional link
110  * between a CPort on a (local) Greybus host device and a CPort on
111  * another Greybus interface.
112  *
113  * A connection also maintains the state of operations sent over the
114  * connection.
115  *
116  * Return: A pointer to the new connection if successful, or NULL otherwise.
117  */
118 static struct gb_connection *
119 gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
120                                 struct gb_interface *intf,
121                                 struct gb_bundle *bundle, int cport_id,
122                                 u8 protocol_id)
123 {
124         struct gb_connection *connection;
125         struct ida *id_map = &hd->cport_id_map;
126         int ida_start, ida_end;
127         u8 major = 0;
128         u8 minor = 1;
129
130         /*
131          * If a manifest tries to reuse a cport, reject it.  We
132          * initialize connections serially so we don't need to worry
133          * about holding the connection lock.
134          */
135         if (bundle && gb_connection_intf_find(bundle->intf, cport_id)) {
136                 dev_err(&bundle->dev, "cport %u already connected\n",
137                                 cport_id);
138                 return NULL;
139         }
140
141         if (hd_cport_id < 0) {
142                 ida_start = 0;
143                 ida_end = hd->num_cports;
144         } else if (hd_cport_id < hd->num_cports) {
145                 ida_start = hd_cport_id;
146                 ida_end = hd_cport_id + 1;
147         } else {
148                 dev_err(&hd->dev, "cport %d not available\n", hd_cport_id);
149                 return NULL;
150         }
151
152         hd_cport_id = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
153         if (hd_cport_id < 0)
154                 return NULL;
155
156         connection = kzalloc(sizeof(*connection), GFP_KERNEL);
157         if (!connection)
158                 goto err_remove_ida;
159
160         connection->hd_cport_id = hd_cport_id;
161         connection->intf_cport_id = cport_id;
162         connection->hd = hd;
163         connection->intf = intf;
164
165         connection->protocol_id = protocol_id;
166         connection->major = major;
167         connection->minor = minor;
168
169         connection->bundle = bundle;
170         connection->state = GB_CONNECTION_STATE_DISABLED;
171
172         atomic_set(&connection->op_cycle, 0);
173         spin_lock_init(&connection->lock);
174         INIT_LIST_HEAD(&connection->operations);
175
176         connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
177                                          dev_name(&hd->dev), hd_cport_id);
178         if (!connection->wq)
179                 goto err_free_connection;
180
181         kref_init(&connection->kref);
182
183         gb_connection_init_name(connection);
184
185         spin_lock_irq(&gb_connections_lock);
186         list_add(&connection->hd_links, &hd->connections);
187
188         if (bundle)
189                 list_add(&connection->bundle_links, &bundle->connections);
190         else
191                 INIT_LIST_HEAD(&connection->bundle_links);
192
193         spin_unlock_irq(&gb_connections_lock);
194
195         return connection;
196
197 err_free_connection:
198         kfree(connection);
199 err_remove_ida:
200         ida_simple_remove(id_map, hd_cport_id);
201
202         return NULL;
203 }
204
205 struct gb_connection *
206 gb_connection_create_static(struct gb_host_device *hd,
207                                         u16 hd_cport_id, u8 protocol_id)
208 {
209         return gb_connection_create(hd, hd_cport_id, NULL, NULL, 0,
210                                                                 protocol_id);
211 }
212
213 struct gb_connection *
214 gb_connection_create_dynamic(struct gb_interface *intf,
215                                         struct gb_bundle *bundle,
216                                         u16 cport_id, u8 protocol_id)
217 {
218         return gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
219                                                                 protocol_id);
220 }
221
222 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
223 {
224         struct gb_host_device *hd = connection->hd;
225         int ret;
226
227         if (!hd->driver->cport_enable)
228                 return 0;
229
230         ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
231         if (ret) {
232                 dev_err(&hd->dev,
233                         "failed to enable host cport: %d\n", ret);
234                 return ret;
235         }
236
237         return 0;
238 }
239
240 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
241 {
242         struct gb_host_device *hd = connection->hd;
243
244         if (!hd->driver->cport_disable)
245                 return;
246
247         hd->driver->cport_disable(hd, connection->hd_cport_id);
248 }
249
250 /*
251  * Request the SVC to create a connection from AP's cport to interface's
252  * cport.
253  */
254 static int
255 gb_connection_svc_connection_create(struct gb_connection *connection)
256 {
257         struct gb_host_device *hd = connection->hd;
258         struct gb_interface *intf;
259         int ret;
260
261         if (gb_connection_is_static(connection))
262                 return 0;
263
264         intf = connection->intf;
265         ret = gb_svc_connection_create(hd->svc,
266                         hd->svc->ap_intf_id,
267                         connection->hd_cport_id,
268                         intf->interface_id,
269                         connection->intf_cport_id,
270                         intf->boot_over_unipro);
271         if (ret) {
272                 dev_err(&connection->hd->dev,
273                         "%s: failed to create svc connection: %d\n",
274                         connection->name, ret);
275                 return ret;
276         }
277
278         return 0;
279 }
280
281 static void
282 gb_connection_svc_connection_destroy(struct gb_connection *connection)
283 {
284         if (gb_connection_is_static(connection))
285                 return;
286
287         gb_svc_connection_destroy(connection->hd->svc,
288                                   connection->hd->svc->ap_intf_id,
289                                   connection->hd_cport_id,
290                                   connection->intf->interface_id,
291                                   connection->intf_cport_id);
292 }
293
294 /* Inform Interface about active CPorts */
295 static int gb_connection_control_connected(struct gb_connection *connection)
296 {
297         struct gb_protocol *protocol = connection->protocol;
298         struct gb_control *control;
299         u16 cport_id = connection->intf_cport_id;
300         int ret;
301
302         if (protocol->flags & GB_PROTOCOL_SKIP_CONTROL_CONNECTED)
303                 return 0;
304
305         control = connection->bundle->intf->control;
306
307         ret = gb_control_connected_operation(control, cport_id);
308         if (ret) {
309                 dev_err(&connection->bundle->dev,
310                         "failed to connect cport: %d\n", ret);
311                 return ret;
312         }
313
314         return 0;
315 }
316
317 /* Inform Interface about inactive CPorts */
318 static void
319 gb_connection_control_disconnected(struct gb_connection *connection)
320 {
321         struct gb_protocol *protocol = connection->protocol;
322         struct gb_control *control;
323         u16 cport_id = connection->intf_cport_id;
324         int ret;
325
326         if (protocol->flags & GB_PROTOCOL_SKIP_CONTROL_DISCONNECTED)
327                 return;
328
329         control = connection->bundle->intf->control;
330
331         ret = gb_control_disconnected_operation(control, cport_id);
332         if (ret) {
333                 dev_warn(&connection->bundle->dev,
334                          "failed to disconnect cport: %d\n", ret);
335         }
336 }
337
338 /*
339  * Request protocol version supported by the module. We don't need to do
340  * this for SVC as that is initiated by the SVC.
341  */
342 static int gb_connection_protocol_get_version(struct gb_connection *connection)
343 {
344         struct gb_protocol *protocol = connection->protocol;
345         int ret;
346
347         if (protocol->flags & GB_PROTOCOL_SKIP_VERSION)
348                 return 0;
349
350         ret = gb_protocol_get_version(connection);
351         if (ret) {
352                 dev_err(&connection->hd->dev,
353                         "%s: failed to get protocol version: %d\n",
354                         connection->name, ret);
355                 return ret;
356         }
357
358         return 0;
359 }
360
361 /*
362  * Cancel all active operations on a connection.
363  *
364  * Locking: Called with connection lock held and state set to DISABLED.
365  */
366 static void gb_connection_cancel_operations(struct gb_connection *connection,
367                                                 int errno)
368 {
369         struct gb_operation *operation;
370
371         while (!list_empty(&connection->operations)) {
372                 operation = list_last_entry(&connection->operations,
373                                                 struct gb_operation, links);
374                 gb_operation_get(operation);
375                 spin_unlock_irq(&connection->lock);
376
377                 if (gb_operation_is_incoming(operation))
378                         gb_operation_cancel_incoming(operation, errno);
379                 else
380                         gb_operation_cancel(operation, errno);
381
382                 gb_operation_put(operation);
383
384                 spin_lock_irq(&connection->lock);
385         }
386 }
387
388 int gb_connection_enable(struct gb_connection *connection,
389                                 gb_request_handler_t handler)
390 {
391         int ret;
392
393         ret = gb_connection_hd_cport_enable(connection);
394         if (ret)
395                 return ret;
396
397         ret = gb_connection_svc_connection_create(connection);
398         if (ret)
399                 goto err_hd_cport_disable;
400
401         spin_lock_irq(&connection->lock);
402         connection->handler = handler;
403         connection->state = GB_CONNECTION_STATE_ENABLED;
404         spin_unlock_irq(&connection->lock);
405
406         ret = gb_connection_control_connected(connection);
407         if (ret)
408                 goto err_svc_destroy;
409
410         return 0;
411
412 err_svc_destroy:
413         spin_lock_irq(&connection->lock);
414         connection->state = GB_CONNECTION_STATE_DISABLED;
415         spin_unlock_irq(&connection->lock);
416
417         gb_connection_svc_connection_destroy(connection);
418 err_hd_cport_disable:
419         gb_connection_hd_cport_disable(connection);
420
421         return ret;
422 }
423 EXPORT_SYMBOL_GPL(gb_connection_enable);
424
425 void gb_connection_disable(struct gb_connection *connection)
426 {
427         if (connection->state == GB_CONNECTION_STATE_DISABLED)
428                 return;
429
430         gb_connection_control_disconnected(connection);
431
432         spin_lock_irq(&connection->lock);
433         connection->state = GB_CONNECTION_STATE_DISABLED;
434         gb_connection_cancel_operations(connection, -ESHUTDOWN);
435         spin_unlock_irq(&connection->lock);
436
437         gb_connection_svc_connection_destroy(connection);
438         gb_connection_hd_cport_disable(connection);
439 }
440 EXPORT_SYMBOL_GPL(gb_connection_disable);
441
442 static int gb_legacy_request_handler(struct gb_operation *operation)
443 {
444         struct gb_protocol *protocol = operation->connection->protocol;
445
446         return protocol->request_recv(operation->type, operation);
447 }
448
449 int gb_connection_legacy_init(struct gb_connection *connection)
450 {
451         gb_request_handler_t handler;
452         int ret;
453
454         ret = gb_connection_bind_protocol(connection);
455         if (ret)
456                 return ret;
457
458         if (connection->protocol->request_recv)
459                 handler = gb_legacy_request_handler;
460         else
461                 handler = NULL;
462
463         ret = gb_connection_enable(connection, handler);
464         if (ret)
465                 goto err_unbind_protocol;
466
467         ret = gb_connection_protocol_get_version(connection);
468         if (ret)
469                 goto err_disable;
470
471         ret = connection->protocol->connection_init(connection);
472         if (ret)
473                 goto err_disable;
474
475         return 0;
476
477 err_disable:
478         gb_connection_disable(connection);
479 err_unbind_protocol:
480         gb_connection_unbind_protocol(connection);
481
482         return ret;
483 }
484 EXPORT_SYMBOL_GPL(gb_connection_legacy_init);
485
486 void gb_connection_legacy_exit(struct gb_connection *connection)
487 {
488         if (connection->state == GB_CONNECTION_STATE_DISABLED)
489                 return;
490
491         gb_connection_disable(connection);
492
493         connection->protocol->connection_exit(connection);
494
495         gb_connection_unbind_protocol(connection);
496 }
497 EXPORT_SYMBOL_GPL(gb_connection_legacy_exit);
498
499 /*
500  * Tear down a previously set up connection.
501  */
502 void gb_connection_destroy(struct gb_connection *connection)
503 {
504         struct ida *id_map;
505
506         if (WARN_ON(!connection))
507                 return;
508
509         spin_lock_irq(&gb_connections_lock);
510         list_del(&connection->bundle_links);
511         list_del(&connection->hd_links);
512         spin_unlock_irq(&gb_connections_lock);
513
514         id_map = &connection->hd->cport_id_map;
515         ida_simple_remove(id_map, connection->hd_cport_id);
516         connection->hd_cport_id = CPORT_ID_BAD;
517
518         kref_put_mutex(&connection->kref, gb_connection_kref_release,
519                        &connection_mutex);
520 }
521
522 void gb_connection_latency_tag_enable(struct gb_connection *connection)
523 {
524         struct gb_host_device *hd = connection->hd;
525         int ret;
526
527         if (!hd->driver->latency_tag_enable)
528                 return;
529
530         ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
531         if (ret) {
532                 dev_err(&connection->hd->dev,
533                         "%s: failed to enable latency tag: %d\n",
534                         connection->name, ret);
535         }
536 }
537 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
538
539 void gb_connection_latency_tag_disable(struct gb_connection *connection)
540 {
541         struct gb_host_device *hd = connection->hd;
542         int ret;
543
544         if (!hd->driver->latency_tag_disable)
545                 return;
546
547         ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
548         if (ret) {
549                 dev_err(&connection->hd->dev,
550                         "%s: failed to disable latency tag: %d\n",
551                         connection->name, ret);
552         }
553 }
554 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
555
556 static int gb_connection_bind_protocol(struct gb_connection *connection)
557 {
558         struct gb_protocol *protocol;
559
560         protocol = gb_protocol_get(connection->protocol_id,
561                                    connection->major,
562                                    connection->minor);
563         if (!protocol) {
564                 dev_err(&connection->hd->dev,
565                                 "protocol 0x%02x version %u.%u not found\n",
566                                 connection->protocol_id,
567                                 connection->major, connection->minor);
568                 return -EPROTONOSUPPORT;
569         }
570         connection->protocol = protocol;
571
572         return 0;
573 }
574
575 static void gb_connection_unbind_protocol(struct gb_connection *connection)
576 {
577         struct gb_protocol *protocol = connection->protocol;
578
579         gb_protocol_put(protocol);
580
581         connection->protocol = NULL;
582 }