greybus: connection: drop the svc quiescing operation
[cascardo/linux.git] / drivers / staging / greybus / connection.c
1 /*
2  * Greybus connections
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13 #include "greybus_trace.h"
14
15
16 static void gb_connection_kref_release(struct kref *kref);
17
18
19 static DEFINE_SPINLOCK(gb_connections_lock);
20 static DEFINE_MUTEX(gb_connection_mutex);
21
22
23 /* Caller holds gb_connection_mutex. */
24 static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
25 {
26         struct gb_host_device *hd = intf->hd;
27         struct gb_connection *connection;
28
29         list_for_each_entry(connection, &hd->connections, hd_links) {
30                 if (connection->intf == intf &&
31                                 connection->intf_cport_id == cport_id)
32                         return true;
33         }
34
35         return false;
36 }
37
38 static void gb_connection_get(struct gb_connection *connection)
39 {
40         kref_get(&connection->kref);
41
42         trace_gb_connection_get(connection);
43 }
44
45 static void gb_connection_put(struct gb_connection *connection)
46 {
47         trace_gb_connection_put(connection);
48
49         kref_put(&connection->kref, gb_connection_kref_release);
50 }
51
52 /*
53  * Returns a reference-counted pointer to the connection if found.
54  */
55 static struct gb_connection *
56 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
57 {
58         struct gb_connection *connection;
59         unsigned long flags;
60
61         spin_lock_irqsave(&gb_connections_lock, flags);
62         list_for_each_entry(connection, &hd->connections, hd_links)
63                 if (connection->hd_cport_id == cport_id) {
64                         gb_connection_get(connection);
65                         goto found;
66                 }
67         connection = NULL;
68 found:
69         spin_unlock_irqrestore(&gb_connections_lock, flags);
70
71         return connection;
72 }
73
74 /*
75  * Callback from the host driver to let us know that data has been
76  * received on the bundle.
77  */
78 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
79                         u8 *data, size_t length)
80 {
81         struct gb_connection *connection;
82
83         trace_gb_hd_in(hd);
84
85         connection = gb_connection_hd_find(hd, cport_id);
86         if (!connection) {
87                 dev_err(&hd->dev,
88                         "nonexistent connection (%zu bytes dropped)\n", length);
89                 return;
90         }
91         gb_connection_recv(connection, data, length);
92         gb_connection_put(connection);
93 }
94 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
95
96 static void gb_connection_kref_release(struct kref *kref)
97 {
98         struct gb_connection *connection;
99
100         connection = container_of(kref, struct gb_connection, kref);
101
102         trace_gb_connection_release(connection);
103
104         kfree(connection);
105 }
106
107 static void gb_connection_init_name(struct gb_connection *connection)
108 {
109         u16 hd_cport_id = connection->hd_cport_id;
110         u16 cport_id = 0;
111         u8 intf_id = 0;
112
113         if (connection->intf) {
114                 intf_id = connection->intf->interface_id;
115                 cport_id = connection->intf_cport_id;
116         }
117
118         snprintf(connection->name, sizeof(connection->name),
119                         "%u/%u:%u", hd_cport_id, intf_id, cport_id);
120 }
121
122 /*
123  * _gb_connection_create() - create a Greybus connection
124  * @hd:                 host device of the connection
125  * @hd_cport_id:        host-device cport id, or -1 for dynamic allocation
126  * @intf:               remote interface, or NULL for static connections
127  * @bundle:             remote-interface bundle (may be NULL)
128  * @cport_id:           remote-interface cport id, or 0 for static connections
129  * @handler:            request handler (may be NULL)
130  * @flags:              connection flags
131  *
132  * Create a Greybus connection, representing the bidirectional link
133  * between a CPort on a (local) Greybus host device and a CPort on
134  * another Greybus interface.
135  *
136  * A connection also maintains the state of operations sent over the
137  * connection.
138  *
139  * Serialised against concurrent create and destroy using the
140  * gb_connection_mutex.
141  *
142  * Return: A pointer to the new connection if successful, or an ERR_PTR
143  * otherwise.
144  */
145 static struct gb_connection *
146 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
147                                 struct gb_interface *intf,
148                                 struct gb_bundle *bundle, int cport_id,
149                                 gb_request_handler_t handler,
150                                 unsigned long flags)
151 {
152         struct gb_connection *connection;
153         int ret;
154
155         mutex_lock(&gb_connection_mutex);
156
157         if (intf && gb_connection_cport_in_use(intf, cport_id)) {
158                 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
159                 ret = -EBUSY;
160                 goto err_unlock;
161         }
162
163         ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
164         if (ret < 0) {
165                 dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
166                 goto err_unlock;
167         }
168         hd_cport_id = ret;
169
170         connection = kzalloc(sizeof(*connection), GFP_KERNEL);
171         if (!connection) {
172                 ret = -ENOMEM;
173                 goto err_hd_cport_release;
174         }
175
176         connection->hd_cport_id = hd_cport_id;
177         connection->intf_cport_id = cport_id;
178         connection->hd = hd;
179         connection->intf = intf;
180         connection->bundle = bundle;
181         connection->handler = handler;
182         connection->flags = flags;
183         if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
184                 connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
185         connection->state = GB_CONNECTION_STATE_DISABLED;
186
187         atomic_set(&connection->op_cycle, 0);
188         mutex_init(&connection->mutex);
189         spin_lock_init(&connection->lock);
190         INIT_LIST_HEAD(&connection->operations);
191
192         connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
193                                          dev_name(&hd->dev), hd_cport_id);
194         if (!connection->wq) {
195                 ret = -ENOMEM;
196                 goto err_free_connection;
197         }
198
199         kref_init(&connection->kref);
200
201         gb_connection_init_name(connection);
202
203         spin_lock_irq(&gb_connections_lock);
204         list_add(&connection->hd_links, &hd->connections);
205
206         if (bundle)
207                 list_add(&connection->bundle_links, &bundle->connections);
208         else
209                 INIT_LIST_HEAD(&connection->bundle_links);
210
211         spin_unlock_irq(&gb_connections_lock);
212
213         mutex_unlock(&gb_connection_mutex);
214
215         trace_gb_connection_create(connection);
216
217         return connection;
218
219 err_free_connection:
220         kfree(connection);
221 err_hd_cport_release:
222         gb_hd_cport_release(hd, hd_cport_id);
223 err_unlock:
224         mutex_unlock(&gb_connection_mutex);
225
226         return ERR_PTR(ret);
227 }
228
229 struct gb_connection *
230 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
231                                         gb_request_handler_t handler)
232 {
233         return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
234                                         GB_CONNECTION_FLAG_HIGH_PRIO);
235 }
236
237 struct gb_connection *
238 gb_connection_create_control(struct gb_interface *intf)
239 {
240         return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
241                                         GB_CONNECTION_FLAG_CONTROL |
242                                         GB_CONNECTION_FLAG_HIGH_PRIO);
243 }
244
245 struct gb_connection *
246 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
247                                         gb_request_handler_t handler)
248 {
249         struct gb_interface *intf = bundle->intf;
250
251         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
252                                         handler, 0);
253 }
254 EXPORT_SYMBOL_GPL(gb_connection_create);
255
256 struct gb_connection *
257 gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
258                                         gb_request_handler_t handler,
259                                         unsigned long flags)
260 {
261         struct gb_interface *intf = bundle->intf;
262
263         if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
264                 flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
265
266         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
267                                         handler, flags);
268 }
269 EXPORT_SYMBOL_GPL(gb_connection_create_flags);
270
271 struct gb_connection *
272 gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
273                                         unsigned long flags)
274 {
275         flags |= GB_CONNECTION_FLAG_OFFLOADED;
276
277         return gb_connection_create_flags(bundle, cport_id, NULL, flags);
278 }
279 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
280
281 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
282 {
283         struct gb_host_device *hd = connection->hd;
284         int ret;
285
286         if (!hd->driver->cport_enable)
287                 return 0;
288
289         ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
290                                         connection->flags);
291         if (ret) {
292                 dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
293                                 connection->name, ret);
294                 return ret;
295         }
296
297         return 0;
298 }
299
300 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
301 {
302         struct gb_host_device *hd = connection->hd;
303         int ret;
304
305         if (!hd->driver->cport_disable)
306                 return;
307
308         ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
309         if (ret) {
310                 dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
311                                 connection->name, ret);
312         }
313 }
314
315 static int gb_connection_hd_cport_flush(struct gb_connection *connection)
316 {
317         struct gb_host_device *hd = connection->hd;
318         int ret;
319
320         if (!hd->driver->cport_flush)
321                 return 0;
322
323         ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
324         if (ret) {
325                 dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
326                                 connection->name, ret);
327                 return ret;
328         }
329
330         return 0;
331 }
332
333 static int
334 gb_connection_hd_cport_features_enable(struct gb_connection *connection)
335 {
336         struct gb_host_device *hd = connection->hd;
337         int ret;
338
339         if (!hd->driver->cport_features_enable)
340                 return 0;
341
342         ret = hd->driver->cport_features_enable(hd, connection->hd_cport_id);
343         if (ret) {
344                 dev_err(&hd->dev, "%s: failed to enable CPort features: %d\n",
345                         connection->name, ret);
346                 return ret;
347         }
348
349         return 0;
350 }
351
352 static void
353 gb_connection_hd_cport_features_disable(struct gb_connection *connection)
354 {
355         struct gb_host_device *hd = connection->hd;
356
357         if (!hd->driver->cport_features_disable)
358                 return;
359
360         hd->driver->cport_features_disable(hd, connection->hd_cport_id);
361 }
362
363 /*
364  * Request the SVC to create a connection from AP's cport to interface's
365  * cport.
366  */
367 static int
368 gb_connection_svc_connection_create(struct gb_connection *connection)
369 {
370         struct gb_host_device *hd = connection->hd;
371         struct gb_interface *intf;
372         u8 cport_flags;
373         int ret;
374
375         if (gb_connection_is_static(connection))
376                 return 0;
377
378         intf = connection->intf;
379
380         /*
381          * Enable either E2EFC or CSD, unless no flow control is requested.
382          */
383         cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
384         if (gb_connection_flow_control_disabled(connection)) {
385                 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
386         } else if (gb_connection_e2efc_enabled(connection)) {
387                 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
388                                 GB_SVC_CPORT_FLAG_E2EFC;
389         }
390
391         ret = gb_svc_connection_create(hd->svc,
392                         hd->svc->ap_intf_id,
393                         connection->hd_cport_id,
394                         intf->interface_id,
395                         connection->intf_cport_id,
396                         cport_flags);
397         if (ret) {
398                 dev_err(&connection->hd->dev,
399                         "%s: failed to create svc connection: %d\n",
400                         connection->name, ret);
401                 return ret;
402         }
403
404         return 0;
405 }
406
407 static void
408 gb_connection_svc_connection_destroy(struct gb_connection *connection)
409 {
410         if (gb_connection_is_static(connection))
411                 return;
412
413         gb_svc_connection_destroy(connection->hd->svc,
414                                   connection->hd->svc->ap_intf_id,
415                                   connection->hd_cport_id,
416                                   connection->intf->interface_id,
417                                   connection->intf_cport_id);
418 }
419
420 /* Inform Interface about active CPorts */
421 static int gb_connection_control_connected(struct gb_connection *connection)
422 {
423         struct gb_control *control;
424         u16 cport_id = connection->intf_cport_id;
425         int ret;
426
427         if (gb_connection_is_static(connection))
428                 return 0;
429
430         if (gb_connection_is_control(connection))
431                 return 0;
432
433         control = connection->intf->control;
434
435         ret = gb_control_connected_operation(control, cport_id);
436         if (ret) {
437                 dev_err(&connection->bundle->dev,
438                         "failed to connect cport: %d\n", ret);
439                 return ret;
440         }
441
442         return 0;
443 }
444
445 static void
446 gb_connection_control_disconnecting(struct gb_connection *connection)
447 {
448         struct gb_control *control;
449         u16 cport_id = connection->intf_cport_id;
450         int ret;
451
452         if (gb_connection_is_static(connection))
453                 return;
454
455         control = connection->intf->control;
456
457         ret = gb_control_disconnecting_operation(control, cport_id);
458         if (ret) {
459                 dev_err(&connection->hd->dev,
460                                 "%s: failed to send disconnecting: %d\n",
461                                 connection->name, ret);
462         }
463 }
464
465 static void
466 gb_connection_control_disconnected(struct gb_connection *connection)
467 {
468         struct gb_control *control;
469         u16 cport_id = connection->intf_cport_id;
470         int ret;
471
472         if (gb_connection_is_static(connection))
473                 return;
474
475         control = connection->intf->control;
476
477         if (gb_connection_is_control(connection)) {
478                 if (connection->mode_switch) {
479                         ret = gb_control_mode_switch_operation(control);
480                         if (ret) {
481                                 /*
482                                  * Allow mode switch to time out waiting for
483                                  * mailbox event.
484                                  */
485                                 return;
486                         }
487                 }
488
489                 return;
490         }
491
492         ret = gb_control_disconnected_operation(control, cport_id);
493         if (ret) {
494                 dev_warn(&connection->bundle->dev,
495                          "failed to disconnect cport: %d\n", ret);
496         }
497 }
498
499 static int gb_connection_ping_operation(struct gb_connection *connection)
500 {
501         struct gb_operation *operation;
502         int ret;
503
504         operation = gb_operation_create_core(connection,
505                                                 GB_REQUEST_TYPE_PING,
506                                                 0, 0, 0,
507                                                 GFP_KERNEL);
508         if (!operation)
509                 return -ENOMEM;
510
511         ret = gb_operation_request_send_sync(operation);
512
513         gb_operation_put(operation);
514
515         return ret;
516 }
517
518 static int gb_connection_ping(struct gb_connection *connection)
519 {
520         struct gb_host_device *hd = connection->hd;
521         int ret;
522
523         if (gb_connection_is_static(connection))
524                 return 0;
525
526         if (gb_connection_is_offloaded(connection)) {
527                 if (!hd->driver->cport_ping)
528                         return 0;
529
530                 ret = hd->driver->cport_ping(hd, connection->intf_cport_id);
531         } else {
532                 ret = gb_connection_ping_operation(connection);
533         }
534
535         if (ret) {
536                 dev_err(&hd->dev, "%s: failed to send ping: %d\n",
537                                 connection->name, ret);
538                 return ret;
539         }
540
541         return 0;
542 }
543
544 /*
545  * Cancel all active operations on a connection.
546  *
547  * Locking: Called with connection lock held and state set to DISABLED or
548  * DISCONNECTING.
549  */
550 static void gb_connection_cancel_operations(struct gb_connection *connection,
551                                                 int errno)
552         __must_hold(&connection->lock)
553 {
554         struct gb_operation *operation;
555
556         while (!list_empty(&connection->operations)) {
557                 operation = list_last_entry(&connection->operations,
558                                                 struct gb_operation, links);
559                 gb_operation_get(operation);
560                 spin_unlock_irq(&connection->lock);
561
562                 if (gb_operation_is_incoming(operation))
563                         gb_operation_cancel_incoming(operation, errno);
564                 else
565                         gb_operation_cancel(operation, errno);
566
567                 gb_operation_put(operation);
568
569                 spin_lock_irq(&connection->lock);
570         }
571 }
572
573 /*
574  * Cancel all active incoming operations on a connection.
575  *
576  * Locking: Called with connection lock held and state set to ENABLED_TX.
577  */
578 static void
579 gb_connection_flush_incoming_operations(struct gb_connection *connection,
580                                                 int errno)
581         __must_hold(&connection->lock)
582 {
583         struct gb_operation *operation;
584         bool incoming;
585
586         while (!list_empty(&connection->operations)) {
587                 incoming = false;
588                 list_for_each_entry(operation, &connection->operations,
589                                                                 links) {
590                         if (gb_operation_is_incoming(operation)) {
591                                 gb_operation_get(operation);
592                                 incoming = true;
593                                 break;
594                         }
595                 }
596
597                 if (!incoming)
598                         break;
599
600                 spin_unlock_irq(&connection->lock);
601
602                 /* FIXME: flush, not cancel? */
603                 gb_operation_cancel_incoming(operation, errno);
604                 gb_operation_put(operation);
605
606                 spin_lock_irq(&connection->lock);
607         }
608 }
609
610 /*
611  * _gb_connection_enable() - enable a connection
612  * @connection:         connection to enable
613  * @rx:                 whether to enable incoming requests
614  *
615  * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
616  * ENABLED_TX->ENABLED state transitions.
617  *
618  * Locking: Caller holds connection->mutex.
619  */
620 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
621 {
622         int ret;
623
624         /* Handle ENABLED_TX -> ENABLED transitions. */
625         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
626                 if (!(connection->handler && rx))
627                         return 0;
628
629                 spin_lock_irq(&connection->lock);
630                 connection->state = GB_CONNECTION_STATE_ENABLED;
631                 spin_unlock_irq(&connection->lock);
632
633                 return 0;
634         }
635
636         ret = gb_connection_hd_cport_enable(connection);
637         if (ret)
638                 return ret;
639
640         ret = gb_connection_svc_connection_create(connection);
641         if (ret)
642                 goto err_hd_cport_disable;
643
644         ret = gb_connection_hd_cport_features_enable(connection);
645         if (ret)
646                 goto err_svc_connection_destroy;
647
648         spin_lock_irq(&connection->lock);
649         if (connection->handler && rx)
650                 connection->state = GB_CONNECTION_STATE_ENABLED;
651         else
652                 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
653         spin_unlock_irq(&connection->lock);
654
655         ret = gb_connection_control_connected(connection);
656         if (ret)
657                 goto err_control_disconnecting;
658
659         return 0;
660
661 err_control_disconnecting:
662         gb_connection_control_disconnecting(connection);
663
664         spin_lock_irq(&connection->lock);
665         connection->state = GB_CONNECTION_STATE_DISCONNECTING;
666         gb_connection_cancel_operations(connection, -ESHUTDOWN);
667         spin_unlock_irq(&connection->lock);
668
669         /* Transmit queue should already be empty. */
670         gb_connection_hd_cport_flush(connection);
671
672         gb_connection_ping(connection);
673         gb_connection_hd_cport_features_disable(connection);
674         gb_connection_control_disconnected(connection);
675         connection->state = GB_CONNECTION_STATE_DISABLED;
676 err_svc_connection_destroy:
677         gb_connection_svc_connection_destroy(connection);
678 err_hd_cport_disable:
679         gb_connection_hd_cport_disable(connection);
680
681         return ret;
682 }
683
684 int gb_connection_enable(struct gb_connection *connection)
685 {
686         int ret = 0;
687
688         mutex_lock(&connection->mutex);
689
690         if (connection->state == GB_CONNECTION_STATE_ENABLED)
691                 goto out_unlock;
692
693         ret = _gb_connection_enable(connection, true);
694         if (!ret)
695                 trace_gb_connection_enable(connection);
696
697 out_unlock:
698         mutex_unlock(&connection->mutex);
699
700         return ret;
701 }
702 EXPORT_SYMBOL_GPL(gb_connection_enable);
703
704 int gb_connection_enable_tx(struct gb_connection *connection)
705 {
706         int ret = 0;
707
708         mutex_lock(&connection->mutex);
709
710         if (connection->state == GB_CONNECTION_STATE_ENABLED) {
711                 ret = -EINVAL;
712                 goto out_unlock;
713         }
714
715         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
716                 goto out_unlock;
717
718         ret = _gb_connection_enable(connection, false);
719         if (!ret)
720                 trace_gb_connection_enable(connection);
721
722 out_unlock:
723         mutex_unlock(&connection->mutex);
724
725         return ret;
726 }
727 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
728
729 void gb_connection_disable_rx(struct gb_connection *connection)
730 {
731         mutex_lock(&connection->mutex);
732
733         spin_lock_irq(&connection->lock);
734         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
735                 spin_unlock_irq(&connection->lock);
736                 goto out_unlock;
737         }
738         connection->state = GB_CONNECTION_STATE_ENABLED_TX;
739         gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
740         spin_unlock_irq(&connection->lock);
741
742         trace_gb_connection_disable(connection);
743
744 out_unlock:
745         mutex_unlock(&connection->mutex);
746 }
747 EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
748
749 void gb_connection_mode_switch_prepare(struct gb_connection *connection)
750 {
751         connection->mode_switch = true;
752 }
753
754 void gb_connection_mode_switch_complete(struct gb_connection *connection)
755 {
756         gb_connection_svc_connection_destroy(connection);
757         gb_connection_hd_cport_disable(connection);
758         connection->mode_switch = false;
759 }
760
761 void gb_connection_disable(struct gb_connection *connection)
762 {
763         mutex_lock(&connection->mutex);
764
765         if (connection->state == GB_CONNECTION_STATE_DISABLED)
766                 goto out_unlock;
767
768         trace_gb_connection_disable(connection);
769
770         gb_connection_control_disconnecting(connection);
771
772         spin_lock_irq(&connection->lock);
773         connection->state = GB_CONNECTION_STATE_DISCONNECTING;
774         gb_connection_cancel_operations(connection, -ESHUTDOWN);
775         spin_unlock_irq(&connection->lock);
776
777         gb_connection_hd_cport_flush(connection);
778
779         gb_connection_ping(connection);
780         gb_connection_hd_cport_features_disable(connection);
781
782         gb_connection_control_disconnected(connection);
783
784         connection->state = GB_CONNECTION_STATE_DISABLED;
785
786         /* control-connection tear down is deferred when mode switching */
787         if (!connection->mode_switch) {
788                 gb_connection_svc_connection_destroy(connection);
789                 gb_connection_hd_cport_disable(connection);
790         }
791
792 out_unlock:
793         mutex_unlock(&connection->mutex);
794 }
795 EXPORT_SYMBOL_GPL(gb_connection_disable);
796
797 /* Disable a connection without communicating with the remote end. */
798 void gb_connection_disable_forced(struct gb_connection *connection)
799 {
800         mutex_lock(&connection->mutex);
801
802         if (connection->state == GB_CONNECTION_STATE_DISABLED)
803                 goto out_unlock;
804
805         trace_gb_connection_disable(connection);
806
807         spin_lock_irq(&connection->lock);
808         connection->state = GB_CONNECTION_STATE_DISABLED;
809         gb_connection_cancel_operations(connection, -ESHUTDOWN);
810         spin_unlock_irq(&connection->lock);
811
812         gb_connection_hd_cport_flush(connection);
813         gb_connection_hd_cport_features_disable(connection);
814         gb_connection_svc_connection_destroy(connection);
815         gb_connection_hd_cport_disable(connection);
816
817 out_unlock:
818         mutex_unlock(&connection->mutex);
819 }
820 EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
821
822 /* Caller must have disabled the connection before destroying it. */
823 void gb_connection_destroy(struct gb_connection *connection)
824 {
825         if (!connection)
826                 return;
827
828         if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
829                 gb_connection_disable(connection);
830
831         mutex_lock(&gb_connection_mutex);
832
833         spin_lock_irq(&gb_connections_lock);
834         list_del(&connection->bundle_links);
835         list_del(&connection->hd_links);
836         spin_unlock_irq(&gb_connections_lock);
837
838         destroy_workqueue(connection->wq);
839
840         gb_hd_cport_release(connection->hd, connection->hd_cport_id);
841         connection->hd_cport_id = CPORT_ID_BAD;
842
843         mutex_unlock(&gb_connection_mutex);
844
845         gb_connection_put(connection);
846 }
847 EXPORT_SYMBOL_GPL(gb_connection_destroy);
848
849 void gb_connection_latency_tag_enable(struct gb_connection *connection)
850 {
851         struct gb_host_device *hd = connection->hd;
852         int ret;
853
854         if (!hd->driver->latency_tag_enable)
855                 return;
856
857         ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
858         if (ret) {
859                 dev_err(&connection->hd->dev,
860                         "%s: failed to enable latency tag: %d\n",
861                         connection->name, ret);
862         }
863 }
864 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
865
866 void gb_connection_latency_tag_disable(struct gb_connection *connection)
867 {
868         struct gb_host_device *hd = connection->hd;
869         int ret;
870
871         if (!hd->driver->latency_tag_disable)
872                 return;
873
874         ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
875         if (ret) {
876                 dev_err(&connection->hd->dev,
877                         "%s: failed to disable latency tag: %d\n",
878                         connection->name, ret);
879         }
880 }
881 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);