greybus: hd: add flag argument to cport_enable callback
[cascardo/linux.git] / drivers / staging / greybus / connection.c
1 /*
2  * Greybus connections
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13 #include "greybus_trace.h"
14
15
16 static void gb_connection_kref_release(struct kref *kref);
17
18
19 static DEFINE_SPINLOCK(gb_connections_lock);
20 static DEFINE_MUTEX(gb_connection_mutex);
21
22
23 /* Caller holds gb_connection_mutex. */
24 static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
25 {
26         struct gb_host_device *hd = intf->hd;
27         struct gb_connection *connection;
28
29         list_for_each_entry(connection, &hd->connections, hd_links) {
30                 if (connection->intf == intf &&
31                                 connection->intf_cport_id == cport_id)
32                         return true;
33         }
34
35         return false;
36 }
37
38 static void gb_connection_get(struct gb_connection *connection)
39 {
40         kref_get(&connection->kref);
41
42         trace_gb_connection_get(connection);
43 }
44
45 static void gb_connection_put(struct gb_connection *connection)
46 {
47         trace_gb_connection_put(connection);
48
49         kref_put(&connection->kref, gb_connection_kref_release);
50 }
51
52 /*
53  * Returns a reference-counted pointer to the connection if found.
54  */
55 static struct gb_connection *
56 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
57 {
58         struct gb_connection *connection;
59         unsigned long flags;
60
61         spin_lock_irqsave(&gb_connections_lock, flags);
62         list_for_each_entry(connection, &hd->connections, hd_links)
63                 if (connection->hd_cport_id == cport_id) {
64                         gb_connection_get(connection);
65                         goto found;
66                 }
67         connection = NULL;
68 found:
69         spin_unlock_irqrestore(&gb_connections_lock, flags);
70
71         return connection;
72 }
73
74 /*
75  * Callback from the host driver to let us know that data has been
76  * received on the bundle.
77  */
78 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
79                         u8 *data, size_t length)
80 {
81         struct gb_connection *connection;
82
83         trace_gb_hd_in(hd);
84
85         connection = gb_connection_hd_find(hd, cport_id);
86         if (!connection) {
87                 dev_err(&hd->dev,
88                         "nonexistent connection (%zu bytes dropped)\n", length);
89                 return;
90         }
91         gb_connection_recv(connection, data, length);
92         gb_connection_put(connection);
93 }
94 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
95
96 static void gb_connection_kref_release(struct kref *kref)
97 {
98         struct gb_connection *connection;
99
100         connection = container_of(kref, struct gb_connection, kref);
101
102         trace_gb_connection_release(connection);
103
104         kfree(connection);
105 }
106
107 static void gb_connection_init_name(struct gb_connection *connection)
108 {
109         u16 hd_cport_id = connection->hd_cport_id;
110         u16 cport_id = 0;
111         u8 intf_id = 0;
112
113         if (connection->intf) {
114                 intf_id = connection->intf->interface_id;
115                 cport_id = connection->intf_cport_id;
116         }
117
118         snprintf(connection->name, sizeof(connection->name),
119                         "%u/%u:%u", hd_cport_id, intf_id, cport_id);
120 }
121
122 /*
123  * _gb_connection_create() - create a Greybus connection
124  * @hd:                 host device of the connection
125  * @hd_cport_id:        host-device cport id, or -1 for dynamic allocation
126  * @intf:               remote interface, or NULL for static connections
127  * @bundle:             remote-interface bundle (may be NULL)
128  * @cport_id:           remote-interface cport id, or 0 for static connections
129  * @handler:            request handler (may be NULL)
130  * @flags:              connection flags
131  *
132  * Create a Greybus connection, representing the bidirectional link
133  * between a CPort on a (local) Greybus host device and a CPort on
134  * another Greybus interface.
135  *
136  * A connection also maintains the state of operations sent over the
137  * connection.
138  *
139  * Serialised against concurrent create and destroy using the
140  * gb_connection_mutex.
141  *
142  * Return: A pointer to the new connection if successful, or an ERR_PTR
143  * otherwise.
144  */
145 static struct gb_connection *
146 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
147                                 struct gb_interface *intf,
148                                 struct gb_bundle *bundle, int cport_id,
149                                 gb_request_handler_t handler,
150                                 unsigned long flags)
151 {
152         struct gb_connection *connection;
153         unsigned long irqflags;
154         int ret;
155
156         mutex_lock(&gb_connection_mutex);
157
158         if (intf && gb_connection_cport_in_use(intf, cport_id)) {
159                 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
160                 ret = -EBUSY;
161                 goto err_unlock;
162         }
163
164         ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
165         if (ret < 0) {
166                 dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
167                 goto err_unlock;
168         }
169         hd_cport_id = ret;
170
171         connection = kzalloc(sizeof(*connection), GFP_KERNEL);
172         if (!connection) {
173                 ret = -ENOMEM;
174                 goto err_hd_cport_release;
175         }
176
177         connection->hd_cport_id = hd_cport_id;
178         connection->intf_cport_id = cport_id;
179         connection->hd = hd;
180         connection->intf = intf;
181         connection->bundle = bundle;
182         connection->handler = handler;
183         connection->flags = flags;
184         if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
185                 connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
186         connection->state = GB_CONNECTION_STATE_DISABLED;
187
188         atomic_set(&connection->op_cycle, 0);
189         mutex_init(&connection->mutex);
190         spin_lock_init(&connection->lock);
191         INIT_LIST_HEAD(&connection->operations);
192
193         connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
194                                          dev_name(&hd->dev), hd_cport_id);
195         if (!connection->wq) {
196                 ret = -ENOMEM;
197                 goto err_free_connection;
198         }
199
200         kref_init(&connection->kref);
201
202         gb_connection_init_name(connection);
203
204         spin_lock_irqsave(&gb_connections_lock, irqflags);
205         list_add(&connection->hd_links, &hd->connections);
206
207         if (bundle)
208                 list_add(&connection->bundle_links, &bundle->connections);
209         else
210                 INIT_LIST_HEAD(&connection->bundle_links);
211
212         spin_unlock_irqrestore(&gb_connections_lock, irqflags);
213
214         mutex_unlock(&gb_connection_mutex);
215
216         trace_gb_connection_create(connection);
217
218         return connection;
219
220 err_free_connection:
221         kfree(connection);
222 err_hd_cport_release:
223         gb_hd_cport_release(hd, hd_cport_id);
224 err_unlock:
225         mutex_unlock(&gb_connection_mutex);
226
227         return ERR_PTR(ret);
228 }
229
230 struct gb_connection *
231 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
232                                         gb_request_handler_t handler)
233 {
234         return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
235                                         0);
236 }
237
238 struct gb_connection *
239 gb_connection_create_control(struct gb_interface *intf)
240 {
241         return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
242                                         GB_CONNECTION_FLAG_CONTROL);
243 }
244
245 struct gb_connection *
246 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
247                                         gb_request_handler_t handler)
248 {
249         struct gb_interface *intf = bundle->intf;
250
251         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
252                                         handler, 0);
253 }
254 EXPORT_SYMBOL_GPL(gb_connection_create);
255
256 struct gb_connection *
257 gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
258                                         gb_request_handler_t handler,
259                                         unsigned long flags)
260 {
261         struct gb_interface *intf = bundle->intf;
262
263         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
264                                         handler, flags);
265 }
266 EXPORT_SYMBOL_GPL(gb_connection_create_flags);
267
268 struct gb_connection *
269 gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
270                                         unsigned long flags)
271 {
272         struct gb_interface *intf = bundle->intf;
273
274         flags |= GB_CONNECTION_FLAG_OFFLOADED;
275
276         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
277                                         NULL, flags);
278 }
279 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
280
281 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
282 {
283         struct gb_host_device *hd = connection->hd;
284         int ret;
285
286         if (!hd->driver->cport_enable)
287                 return 0;
288
289         ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
290                                         connection->flags);
291         if (ret) {
292                 dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
293                                 connection->name, ret);
294                 return ret;
295         }
296
297         return 0;
298 }
299
300 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
301 {
302         struct gb_host_device *hd = connection->hd;
303         int ret;
304
305         if (!hd->driver->cport_disable)
306                 return;
307
308         ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
309         if (ret) {
310                 dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
311                                 connection->name, ret);
312         }
313 }
314
315 static int gb_connection_hd_cport_flush(struct gb_connection *connection)
316 {
317         struct gb_host_device *hd = connection->hd;
318         int ret;
319
320         if (!hd->driver->cport_flush)
321                 return 0;
322
323         ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
324         if (ret) {
325                 dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
326                                 connection->name, ret);
327                 return ret;
328         }
329
330         return 0;
331 }
332
333 static int
334 gb_connection_hd_cport_features_enable(struct gb_connection *connection)
335 {
336         struct gb_host_device *hd = connection->hd;
337         int ret;
338
339         if (!hd->driver->cport_features_enable)
340                 return 0;
341
342         ret = hd->driver->cport_features_enable(hd, connection->hd_cport_id);
343         if (ret) {
344                 dev_err(&hd->dev, "%s: failed to enable CPort features: %d\n",
345                         connection->name, ret);
346                 return ret;
347         }
348
349         return 0;
350 }
351
352 static void
353 gb_connection_hd_cport_features_disable(struct gb_connection *connection)
354 {
355         struct gb_host_device *hd = connection->hd;
356
357         if (!hd->driver->cport_features_disable)
358                 return;
359
360         hd->driver->cport_features_disable(hd, connection->hd_cport_id);
361 }
362
363 /*
364  * Request the SVC to create a connection from AP's cport to interface's
365  * cport.
366  */
367 static int
368 gb_connection_svc_connection_create(struct gb_connection *connection)
369 {
370         struct gb_host_device *hd = connection->hd;
371         struct gb_interface *intf;
372         u8 cport_flags;
373         int ret;
374
375         if (gb_connection_is_static(connection))
376                 return 0;
377
378         intf = connection->intf;
379
380         /*
381          * Enable either E2EFC or CSD, unless no flow control is requested.
382          */
383         cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
384         if (gb_connection_flow_control_disabled(connection)) {
385                 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
386         } else if (gb_connection_e2efc_enabled(connection)) {
387                 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
388                                 GB_SVC_CPORT_FLAG_E2EFC;
389         }
390
391         ret = gb_svc_connection_create(hd->svc,
392                         hd->svc->ap_intf_id,
393                         connection->hd_cport_id,
394                         intf->interface_id,
395                         connection->intf_cport_id,
396                         cport_flags);
397         if (ret) {
398                 dev_err(&connection->hd->dev,
399                         "%s: failed to create svc connection: %d\n",
400                         connection->name, ret);
401                 return ret;
402         }
403
404         return 0;
405 }
406
407 static void
408 gb_connection_svc_connection_destroy(struct gb_connection *connection)
409 {
410         if (gb_connection_is_static(connection))
411                 return;
412
413         gb_svc_connection_destroy(connection->hd->svc,
414                                   connection->hd->svc->ap_intf_id,
415                                   connection->hd_cport_id,
416                                   connection->intf->interface_id,
417                                   connection->intf_cport_id);
418 }
419
420 static void
421 gb_connection_svc_connection_quiescing(struct gb_connection *connection)
422 {
423         struct gb_host_device *hd = connection->hd;
424
425         if (gb_connection_is_static(connection))
426                 return;
427
428         gb_svc_connection_quiescing(hd->svc,
429                                         hd->svc->ap_intf_id,
430                                         connection->hd_cport_id,
431                                         connection->intf->interface_id,
432                                         connection->intf_cport_id);
433 }
434
435 /* Inform Interface about active CPorts */
436 static int gb_connection_control_connected(struct gb_connection *connection)
437 {
438         struct gb_control *control;
439         u16 cport_id = connection->intf_cport_id;
440         int ret;
441
442         if (gb_connection_is_static(connection))
443                 return 0;
444
445         /*
446          * HACK: Suppress connected request for the offloaded camera
447          * connection as it is currently not supported by firmware. Note that
448          * the corresponding non-fatal disconnected event is still sent.
449          */
450         if (gb_connection_is_offloaded(connection) &&
451                         connection->flags & GB_CONNECTION_FLAG_CDSI1) {
452                 return 0;
453         }
454
455         if (gb_connection_is_control(connection))
456                 return 0;
457
458         control = connection->intf->control;
459
460         ret = gb_control_connected_operation(control, cport_id);
461         if (ret) {
462                 dev_err(&connection->bundle->dev,
463                         "failed to connect cport: %d\n", ret);
464                 return ret;
465         }
466
467         return 0;
468 }
469
470 static void
471 gb_connection_control_disconnecting(struct gb_connection *connection)
472 {
473         struct gb_control *control;
474         u16 cport_id = connection->intf_cport_id;
475         int ret;
476
477         if (gb_connection_is_static(connection))
478                 return;
479
480         control = connection->intf->control;
481
482         ret = gb_control_disconnecting_operation(control, cport_id);
483         if (ret) {
484                 dev_err(&connection->hd->dev,
485                                 "%s: failed to send disconnecting: %d\n",
486                                 connection->name, ret);
487         }
488 }
489
490 static void
491 gb_connection_control_disconnected(struct gb_connection *connection)
492 {
493         struct gb_control *control;
494         u16 cport_id = connection->intf_cport_id;
495         int ret;
496
497         if (gb_connection_is_static(connection))
498                 return;
499
500         control = connection->intf->control;
501
502         if (gb_connection_is_control(connection)) {
503                 if (connection->mode_switch) {
504                         ret = gb_control_mode_switch_operation(control);
505                         if (ret) {
506                                 /*
507                                  * Allow mode switch to time out waiting for
508                                  * mailbox event.
509                                  */
510                                 return;
511                         }
512                 }
513
514                 return;
515         }
516
517         ret = gb_control_disconnected_operation(control, cport_id);
518         if (ret) {
519                 dev_warn(&connection->bundle->dev,
520                          "failed to disconnect cport: %d\n", ret);
521         }
522 }
523
524 static int gb_connection_ping_operation(struct gb_connection *connection)
525 {
526         struct gb_operation *operation;
527         int ret;
528
529         operation = gb_operation_create_core(connection,
530                                                 GB_REQUEST_TYPE_PING,
531                                                 0, 0, 0,
532                                                 GFP_KERNEL);
533         if (!operation)
534                 return -ENOMEM;
535
536         ret = gb_operation_request_send_sync(operation);
537
538         gb_operation_put(operation);
539
540         return ret;
541 }
542
543 static int gb_connection_ping(struct gb_connection *connection)
544 {
545         struct gb_host_device *hd = connection->hd;
546         int ret;
547
548         if (gb_connection_is_static(connection))
549                 return 0;
550
551         if (gb_connection_is_offloaded(connection)) {
552                 if (!hd->driver->cport_ping)
553                         return 0;
554
555                 ret = hd->driver->cport_ping(hd, connection->intf_cport_id);
556         } else {
557                 ret = gb_connection_ping_operation(connection);
558         }
559
560         if (ret) {
561                 dev_err(&hd->dev, "%s: failed to send ping: %d\n",
562                                 connection->name, ret);
563                 return ret;
564         }
565
566         return 0;
567 }
568
569 /*
570  * Cancel all active operations on a connection.
571  *
572  * Locking: Called with connection lock held and state set to DISABLED or
573  * DISCONNECTING.
574  */
575 static void gb_connection_cancel_operations(struct gb_connection *connection,
576                                                 int errno, unsigned long *flags)
577         __must_hold(&connection->lock)
578 {
579         struct gb_operation *operation;
580
581         while (!list_empty(&connection->operations)) {
582                 operation = list_last_entry(&connection->operations,
583                                                 struct gb_operation, links);
584                 gb_operation_get(operation);
585                 spin_unlock_irqrestore(&connection->lock, *flags);
586
587                 if (gb_operation_is_incoming(operation))
588                         gb_operation_cancel_incoming(operation, errno);
589                 else
590                         gb_operation_cancel(operation, errno);
591
592                 gb_operation_put(operation);
593
594                 spin_lock_irqsave(&connection->lock, *flags);
595         }
596 }
597
598 /*
599  * Cancel all active incoming operations on a connection.
600  *
601  * Locking: Called with connection lock held and state set to ENABLED_TX.
602  */
603 static void
604 gb_connection_flush_incoming_operations(struct gb_connection *connection,
605                                                 int errno, unsigned long *flags)
606         __must_hold(&connection->lock)
607 {
608         struct gb_operation *operation;
609         bool incoming;
610
611         while (!list_empty(&connection->operations)) {
612                 incoming = false;
613                 list_for_each_entry(operation, &connection->operations,
614                                                                 links) {
615                         if (gb_operation_is_incoming(operation)) {
616                                 gb_operation_get(operation);
617                                 incoming = true;
618                                 break;
619                         }
620                 }
621
622                 if (!incoming)
623                         break;
624
625                 spin_unlock_irqrestore(&connection->lock, *flags);
626
627                 /* FIXME: flush, not cancel? */
628                 gb_operation_cancel_incoming(operation, errno);
629                 gb_operation_put(operation);
630
631                 spin_lock_irqsave(&connection->lock, *flags);
632         }
633 }
634
635 /*
636  * _gb_connection_enable() - enable a connection
637  * @connection:         connection to enable
638  * @rx:                 whether to enable incoming requests
639  *
640  * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
641  * ENABLED_TX->ENABLED state transitions.
642  *
643  * Locking: Caller holds connection->mutex.
644  */
645 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
646 {
647         int ret;
648         unsigned long flags;
649
650         /* Handle ENABLED_TX -> ENABLED transitions. */
651         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
652                 if (!(connection->handler && rx))
653                         return 0;
654
655                 spin_lock_irqsave(&connection->lock, flags);
656                 connection->state = GB_CONNECTION_STATE_ENABLED;
657                 spin_unlock_irqrestore(&connection->lock, flags);
658
659                 return 0;
660         }
661
662         ret = gb_connection_hd_cport_enable(connection);
663         if (ret)
664                 return ret;
665
666         ret = gb_connection_svc_connection_create(connection);
667         if (ret)
668                 goto err_hd_cport_disable;
669
670         ret = gb_connection_hd_cport_features_enable(connection);
671         if (ret)
672                 goto err_svc_connection_destroy;
673
674         spin_lock_irqsave(&connection->lock, flags);
675         if (connection->handler && rx)
676                 connection->state = GB_CONNECTION_STATE_ENABLED;
677         else
678                 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
679         spin_unlock_irqrestore(&connection->lock, flags);
680
681         ret = gb_connection_control_connected(connection);
682         if (ret)
683                 goto err_control_disconnecting;
684
685         return 0;
686
687 err_control_disconnecting:
688         gb_connection_control_disconnecting(connection);
689
690         spin_lock_irqsave(&connection->lock, flags);
691         connection->state = GB_CONNECTION_STATE_DISCONNECTING;
692         gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
693         spin_unlock_irqrestore(&connection->lock, flags);
694
695         /* Transmit queue should already be empty. */
696         gb_connection_hd_cport_flush(connection);
697
698         gb_connection_ping(connection);
699         gb_connection_hd_cport_features_disable(connection);
700         gb_connection_svc_connection_quiescing(connection);
701         gb_connection_ping(connection);
702         gb_connection_control_disconnected(connection);
703         connection->state = GB_CONNECTION_STATE_DISABLED;
704 err_svc_connection_destroy:
705         gb_connection_svc_connection_destroy(connection);
706 err_hd_cport_disable:
707         gb_connection_hd_cport_disable(connection);
708
709         return ret;
710 }
711
712 int gb_connection_enable(struct gb_connection *connection)
713 {
714         int ret = 0;
715
716         mutex_lock(&connection->mutex);
717
718         if (connection->state == GB_CONNECTION_STATE_ENABLED)
719                 goto out_unlock;
720
721         ret = _gb_connection_enable(connection, true);
722         if (!ret)
723                 trace_gb_connection_enable(connection);
724
725 out_unlock:
726         mutex_unlock(&connection->mutex);
727
728         return ret;
729 }
730 EXPORT_SYMBOL_GPL(gb_connection_enable);
731
732 int gb_connection_enable_tx(struct gb_connection *connection)
733 {
734         int ret = 0;
735
736         mutex_lock(&connection->mutex);
737
738         if (connection->state == GB_CONNECTION_STATE_ENABLED) {
739                 ret = -EINVAL;
740                 goto out_unlock;
741         }
742
743         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
744                 goto out_unlock;
745
746         ret = _gb_connection_enable(connection, false);
747         if (!ret)
748                 trace_gb_connection_enable(connection);
749
750 out_unlock:
751         mutex_unlock(&connection->mutex);
752
753         return ret;
754 }
755 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
756
757 void gb_connection_disable_rx(struct gb_connection *connection)
758 {
759         unsigned long flags;
760
761         mutex_lock(&connection->mutex);
762
763         spin_lock_irqsave(&connection->lock, flags);
764         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
765                 spin_unlock_irqrestore(&connection->lock, flags);
766                 goto out_unlock;
767         }
768         connection->state = GB_CONNECTION_STATE_ENABLED_TX;
769         gb_connection_flush_incoming_operations(connection, -ESHUTDOWN, &flags);
770         spin_unlock_irqrestore(&connection->lock, flags);
771
772         trace_gb_connection_disable(connection);
773
774 out_unlock:
775         mutex_unlock(&connection->mutex);
776 }
777 EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
778
779 void gb_connection_mode_switch_prepare(struct gb_connection *connection)
780 {
781         connection->mode_switch = true;
782 }
783
784 void gb_connection_mode_switch_complete(struct gb_connection *connection)
785 {
786         gb_connection_svc_connection_destroy(connection);
787         gb_connection_hd_cport_disable(connection);
788         connection->mode_switch = false;
789 }
790
791 void gb_connection_disable(struct gb_connection *connection)
792 {
793         unsigned long flags;
794
795         mutex_lock(&connection->mutex);
796
797         if (connection->state == GB_CONNECTION_STATE_DISABLED)
798                 goto out_unlock;
799
800         trace_gb_connection_disable(connection);
801
802         gb_connection_control_disconnecting(connection);
803
804         spin_lock_irqsave(&connection->lock, flags);
805         connection->state = GB_CONNECTION_STATE_DISCONNECTING;
806         gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
807         spin_unlock_irqrestore(&connection->lock, flags);
808
809         gb_connection_hd_cport_flush(connection);
810
811         gb_connection_ping(connection);
812         gb_connection_hd_cport_features_disable(connection);
813         gb_connection_svc_connection_quiescing(connection);
814         gb_connection_ping(connection);
815
816         gb_connection_control_disconnected(connection);
817
818         connection->state = GB_CONNECTION_STATE_DISABLED;
819
820         /* control-connection tear down is deferred when mode switching */
821         if (!connection->mode_switch) {
822                 gb_connection_svc_connection_destroy(connection);
823                 gb_connection_hd_cport_disable(connection);
824         }
825
826 out_unlock:
827         mutex_unlock(&connection->mutex);
828 }
829 EXPORT_SYMBOL_GPL(gb_connection_disable);
830
831 /* Disable a connection without communicating with the remote end. */
832 void gb_connection_disable_forced(struct gb_connection *connection)
833 {
834         unsigned long flags;
835
836         mutex_lock(&connection->mutex);
837
838         if (connection->state == GB_CONNECTION_STATE_DISABLED)
839                 goto out_unlock;
840
841         trace_gb_connection_disable(connection);
842
843         spin_lock_irqsave(&connection->lock, flags);
844         connection->state = GB_CONNECTION_STATE_DISABLED;
845         gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
846         spin_unlock_irqrestore(&connection->lock, flags);
847
848         gb_connection_hd_cport_flush(connection);
849         gb_connection_hd_cport_features_disable(connection);
850         gb_connection_svc_connection_destroy(connection);
851         gb_connection_hd_cport_disable(connection);
852
853 out_unlock:
854         mutex_unlock(&connection->mutex);
855 }
856 EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
857
858 /* Caller must have disabled the connection before destroying it. */
859 void gb_connection_destroy(struct gb_connection *connection)
860 {
861         unsigned long flags;
862
863         if (!connection)
864                 return;
865
866         if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
867                 gb_connection_disable(connection);
868
869         mutex_lock(&gb_connection_mutex);
870
871         spin_lock_irqsave(&gb_connections_lock, flags);
872         list_del(&connection->bundle_links);
873         list_del(&connection->hd_links);
874         spin_unlock_irqrestore(&gb_connections_lock, flags);
875
876         destroy_workqueue(connection->wq);
877
878         gb_hd_cport_release(connection->hd, connection->hd_cport_id);
879         connection->hd_cport_id = CPORT_ID_BAD;
880
881         mutex_unlock(&gb_connection_mutex);
882
883         gb_connection_put(connection);
884 }
885 EXPORT_SYMBOL_GPL(gb_connection_destroy);
886
887 void gb_connection_latency_tag_enable(struct gb_connection *connection)
888 {
889         struct gb_host_device *hd = connection->hd;
890         int ret;
891
892         if (!hd->driver->latency_tag_enable)
893                 return;
894
895         ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
896         if (ret) {
897                 dev_err(&connection->hd->dev,
898                         "%s: failed to enable latency tag: %d\n",
899                         connection->name, ret);
900         }
901 }
902 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
903
904 void gb_connection_latency_tag_disable(struct gb_connection *connection)
905 {
906         struct gb_host_device *hd = connection->hd;
907         int ret;
908
909         if (!hd->driver->latency_tag_disable)
910                 return;
911
912         ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
913         if (ret) {
914                 dev_err(&connection->hd->dev,
915                         "%s: failed to disable latency tag: %d\n",
916                         connection->name, ret);
917         }
918 }
919 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);