greybus: connection: add support for high-priority connections
[cascardo/linux.git] / drivers / staging / greybus / connection.c
1 /*
2  * Greybus connections
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13 #include "greybus_trace.h"
14
15
16 static void gb_connection_kref_release(struct kref *kref);
17
18
19 static DEFINE_SPINLOCK(gb_connections_lock);
20 static DEFINE_MUTEX(gb_connection_mutex);
21
22
23 /* Caller holds gb_connection_mutex. */
24 static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
25 {
26         struct gb_host_device *hd = intf->hd;
27         struct gb_connection *connection;
28
29         list_for_each_entry(connection, &hd->connections, hd_links) {
30                 if (connection->intf == intf &&
31                                 connection->intf_cport_id == cport_id)
32                         return true;
33         }
34
35         return false;
36 }
37
38 static void gb_connection_get(struct gb_connection *connection)
39 {
40         kref_get(&connection->kref);
41
42         trace_gb_connection_get(connection);
43 }
44
45 static void gb_connection_put(struct gb_connection *connection)
46 {
47         trace_gb_connection_put(connection);
48
49         kref_put(&connection->kref, gb_connection_kref_release);
50 }
51
52 /*
53  * Returns a reference-counted pointer to the connection if found.
54  */
55 static struct gb_connection *
56 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
57 {
58         struct gb_connection *connection;
59         unsigned long flags;
60
61         spin_lock_irqsave(&gb_connections_lock, flags);
62         list_for_each_entry(connection, &hd->connections, hd_links)
63                 if (connection->hd_cport_id == cport_id) {
64                         gb_connection_get(connection);
65                         goto found;
66                 }
67         connection = NULL;
68 found:
69         spin_unlock_irqrestore(&gb_connections_lock, flags);
70
71         return connection;
72 }
73
74 /*
75  * Callback from the host driver to let us know that data has been
76  * received on the bundle.
77  */
78 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
79                         u8 *data, size_t length)
80 {
81         struct gb_connection *connection;
82
83         trace_gb_hd_in(hd);
84
85         connection = gb_connection_hd_find(hd, cport_id);
86         if (!connection) {
87                 dev_err(&hd->dev,
88                         "nonexistent connection (%zu bytes dropped)\n", length);
89                 return;
90         }
91         gb_connection_recv(connection, data, length);
92         gb_connection_put(connection);
93 }
94 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
95
96 static void gb_connection_kref_release(struct kref *kref)
97 {
98         struct gb_connection *connection;
99
100         connection = container_of(kref, struct gb_connection, kref);
101
102         trace_gb_connection_release(connection);
103
104         kfree(connection);
105 }
106
107 static void gb_connection_init_name(struct gb_connection *connection)
108 {
109         u16 hd_cport_id = connection->hd_cport_id;
110         u16 cport_id = 0;
111         u8 intf_id = 0;
112
113         if (connection->intf) {
114                 intf_id = connection->intf->interface_id;
115                 cport_id = connection->intf_cport_id;
116         }
117
118         snprintf(connection->name, sizeof(connection->name),
119                         "%u/%u:%u", hd_cport_id, intf_id, cport_id);
120 }
121
122 /*
123  * _gb_connection_create() - create a Greybus connection
124  * @hd:                 host device of the connection
125  * @hd_cport_id:        host-device cport id, or -1 for dynamic allocation
126  * @intf:               remote interface, or NULL for static connections
127  * @bundle:             remote-interface bundle (may be NULL)
128  * @cport_id:           remote-interface cport id, or 0 for static connections
129  * @handler:            request handler (may be NULL)
130  * @flags:              connection flags
131  *
132  * Create a Greybus connection, representing the bidirectional link
133  * between a CPort on a (local) Greybus host device and a CPort on
134  * another Greybus interface.
135  *
136  * A connection also maintains the state of operations sent over the
137  * connection.
138  *
139  * Serialised against concurrent create and destroy using the
140  * gb_connection_mutex.
141  *
142  * Return: A pointer to the new connection if successful, or an ERR_PTR
143  * otherwise.
144  */
145 static struct gb_connection *
146 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
147                                 struct gb_interface *intf,
148                                 struct gb_bundle *bundle, int cport_id,
149                                 gb_request_handler_t handler,
150                                 unsigned long flags)
151 {
152         struct gb_connection *connection;
153         unsigned long irqflags;
154         int ret;
155
156         mutex_lock(&gb_connection_mutex);
157
158         if (intf && gb_connection_cport_in_use(intf, cport_id)) {
159                 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
160                 ret = -EBUSY;
161                 goto err_unlock;
162         }
163
164         ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
165         if (ret < 0) {
166                 dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
167                 goto err_unlock;
168         }
169         hd_cport_id = ret;
170
171         connection = kzalloc(sizeof(*connection), GFP_KERNEL);
172         if (!connection) {
173                 ret = -ENOMEM;
174                 goto err_hd_cport_release;
175         }
176
177         connection->hd_cport_id = hd_cport_id;
178         connection->intf_cport_id = cport_id;
179         connection->hd = hd;
180         connection->intf = intf;
181         connection->bundle = bundle;
182         connection->handler = handler;
183         connection->flags = flags;
184         if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
185                 connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
186         connection->state = GB_CONNECTION_STATE_DISABLED;
187
188         atomic_set(&connection->op_cycle, 0);
189         mutex_init(&connection->mutex);
190         spin_lock_init(&connection->lock);
191         INIT_LIST_HEAD(&connection->operations);
192
193         connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
194                                          dev_name(&hd->dev), hd_cport_id);
195         if (!connection->wq) {
196                 ret = -ENOMEM;
197                 goto err_free_connection;
198         }
199
200         kref_init(&connection->kref);
201
202         gb_connection_init_name(connection);
203
204         spin_lock_irqsave(&gb_connections_lock, irqflags);
205         list_add(&connection->hd_links, &hd->connections);
206
207         if (bundle)
208                 list_add(&connection->bundle_links, &bundle->connections);
209         else
210                 INIT_LIST_HEAD(&connection->bundle_links);
211
212         spin_unlock_irqrestore(&gb_connections_lock, irqflags);
213
214         mutex_unlock(&gb_connection_mutex);
215
216         trace_gb_connection_create(connection);
217
218         return connection;
219
220 err_free_connection:
221         kfree(connection);
222 err_hd_cport_release:
223         gb_hd_cport_release(hd, hd_cport_id);
224 err_unlock:
225         mutex_unlock(&gb_connection_mutex);
226
227         return ERR_PTR(ret);
228 }
229
230 struct gb_connection *
231 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
232                                         gb_request_handler_t handler)
233 {
234         return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
235                                         GB_CONNECTION_FLAG_HIGH_PRIO);
236 }
237
238 struct gb_connection *
239 gb_connection_create_control(struct gb_interface *intf)
240 {
241         return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
242                                         GB_CONNECTION_FLAG_CONTROL |
243                                         GB_CONNECTION_FLAG_HIGH_PRIO);
244 }
245
246 struct gb_connection *
247 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
248                                         gb_request_handler_t handler)
249 {
250         struct gb_interface *intf = bundle->intf;
251
252         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
253                                         handler, 0);
254 }
255 EXPORT_SYMBOL_GPL(gb_connection_create);
256
257 struct gb_connection *
258 gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
259                                         gb_request_handler_t handler,
260                                         unsigned long flags)
261 {
262         struct gb_interface *intf = bundle->intf;
263
264         if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
265                 flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
266
267         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
268                                         handler, flags);
269 }
270 EXPORT_SYMBOL_GPL(gb_connection_create_flags);
271
272 struct gb_connection *
273 gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
274                                         unsigned long flags)
275 {
276         flags |= GB_CONNECTION_FLAG_OFFLOADED;
277
278         return gb_connection_create_flags(bundle, cport_id, NULL, flags);
279 }
280 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
281
282 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
283 {
284         struct gb_host_device *hd = connection->hd;
285         int ret;
286
287         if (!hd->driver->cport_enable)
288                 return 0;
289
290         ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
291                                         connection->flags);
292         if (ret) {
293                 dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
294                                 connection->name, ret);
295                 return ret;
296         }
297
298         return 0;
299 }
300
301 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
302 {
303         struct gb_host_device *hd = connection->hd;
304         int ret;
305
306         if (!hd->driver->cport_disable)
307                 return;
308
309         ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
310         if (ret) {
311                 dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
312                                 connection->name, ret);
313         }
314 }
315
316 static int gb_connection_hd_cport_flush(struct gb_connection *connection)
317 {
318         struct gb_host_device *hd = connection->hd;
319         int ret;
320
321         if (!hd->driver->cport_flush)
322                 return 0;
323
324         ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
325         if (ret) {
326                 dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
327                                 connection->name, ret);
328                 return ret;
329         }
330
331         return 0;
332 }
333
334 static int
335 gb_connection_hd_cport_features_enable(struct gb_connection *connection)
336 {
337         struct gb_host_device *hd = connection->hd;
338         int ret;
339
340         if (!hd->driver->cport_features_enable)
341                 return 0;
342
343         ret = hd->driver->cport_features_enable(hd, connection->hd_cport_id);
344         if (ret) {
345                 dev_err(&hd->dev, "%s: failed to enable CPort features: %d\n",
346                         connection->name, ret);
347                 return ret;
348         }
349
350         return 0;
351 }
352
353 static void
354 gb_connection_hd_cport_features_disable(struct gb_connection *connection)
355 {
356         struct gb_host_device *hd = connection->hd;
357
358         if (!hd->driver->cport_features_disable)
359                 return;
360
361         hd->driver->cport_features_disable(hd, connection->hd_cport_id);
362 }
363
364 /*
365  * Request the SVC to create a connection from AP's cport to interface's
366  * cport.
367  */
368 static int
369 gb_connection_svc_connection_create(struct gb_connection *connection)
370 {
371         struct gb_host_device *hd = connection->hd;
372         struct gb_interface *intf;
373         u8 cport_flags;
374         int ret;
375
376         if (gb_connection_is_static(connection))
377                 return 0;
378
379         intf = connection->intf;
380
381         /*
382          * Enable either E2EFC or CSD, unless no flow control is requested.
383          */
384         cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
385         if (gb_connection_flow_control_disabled(connection)) {
386                 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
387         } else if (gb_connection_e2efc_enabled(connection)) {
388                 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
389                                 GB_SVC_CPORT_FLAG_E2EFC;
390         }
391
392         ret = gb_svc_connection_create(hd->svc,
393                         hd->svc->ap_intf_id,
394                         connection->hd_cport_id,
395                         intf->interface_id,
396                         connection->intf_cport_id,
397                         cport_flags);
398         if (ret) {
399                 dev_err(&connection->hd->dev,
400                         "%s: failed to create svc connection: %d\n",
401                         connection->name, ret);
402                 return ret;
403         }
404
405         return 0;
406 }
407
408 static void
409 gb_connection_svc_connection_destroy(struct gb_connection *connection)
410 {
411         if (gb_connection_is_static(connection))
412                 return;
413
414         gb_svc_connection_destroy(connection->hd->svc,
415                                   connection->hd->svc->ap_intf_id,
416                                   connection->hd_cport_id,
417                                   connection->intf->interface_id,
418                                   connection->intf_cport_id);
419 }
420
421 static void
422 gb_connection_svc_connection_quiescing(struct gb_connection *connection)
423 {
424         struct gb_host_device *hd = connection->hd;
425
426         if (gb_connection_is_static(connection))
427                 return;
428
429         gb_svc_connection_quiescing(hd->svc,
430                                         hd->svc->ap_intf_id,
431                                         connection->hd_cport_id,
432                                         connection->intf->interface_id,
433                                         connection->intf_cport_id);
434 }
435
436 /* Inform Interface about active CPorts */
437 static int gb_connection_control_connected(struct gb_connection *connection)
438 {
439         struct gb_control *control;
440         u16 cport_id = connection->intf_cport_id;
441         int ret;
442
443         if (gb_connection_is_static(connection))
444                 return 0;
445
446         /*
447          * HACK: Suppress connected request for the offloaded camera
448          * connection as it is currently not supported by firmware. Note that
449          * the corresponding non-fatal disconnected event is still sent.
450          */
451         if (gb_connection_is_offloaded(connection) &&
452                         connection->flags & GB_CONNECTION_FLAG_CDSI1) {
453                 return 0;
454         }
455
456         if (gb_connection_is_control(connection))
457                 return 0;
458
459         control = connection->intf->control;
460
461         ret = gb_control_connected_operation(control, cport_id);
462         if (ret) {
463                 dev_err(&connection->bundle->dev,
464                         "failed to connect cport: %d\n", ret);
465                 return ret;
466         }
467
468         return 0;
469 }
470
471 static void
472 gb_connection_control_disconnecting(struct gb_connection *connection)
473 {
474         struct gb_control *control;
475         u16 cport_id = connection->intf_cport_id;
476         int ret;
477
478         if (gb_connection_is_static(connection))
479                 return;
480
481         control = connection->intf->control;
482
483         ret = gb_control_disconnecting_operation(control, cport_id);
484         if (ret) {
485                 dev_err(&connection->hd->dev,
486                                 "%s: failed to send disconnecting: %d\n",
487                                 connection->name, ret);
488         }
489 }
490
491 static void
492 gb_connection_control_disconnected(struct gb_connection *connection)
493 {
494         struct gb_control *control;
495         u16 cport_id = connection->intf_cport_id;
496         int ret;
497
498         if (gb_connection_is_static(connection))
499                 return;
500
501         control = connection->intf->control;
502
503         if (gb_connection_is_control(connection)) {
504                 if (connection->mode_switch) {
505                         ret = gb_control_mode_switch_operation(control);
506                         if (ret) {
507                                 /*
508                                  * Allow mode switch to time out waiting for
509                                  * mailbox event.
510                                  */
511                                 return;
512                         }
513                 }
514
515                 return;
516         }
517
518         ret = gb_control_disconnected_operation(control, cport_id);
519         if (ret) {
520                 dev_warn(&connection->bundle->dev,
521                          "failed to disconnect cport: %d\n", ret);
522         }
523 }
524
525 static int gb_connection_ping_operation(struct gb_connection *connection)
526 {
527         struct gb_operation *operation;
528         int ret;
529
530         operation = gb_operation_create_core(connection,
531                                                 GB_REQUEST_TYPE_PING,
532                                                 0, 0, 0,
533                                                 GFP_KERNEL);
534         if (!operation)
535                 return -ENOMEM;
536
537         ret = gb_operation_request_send_sync(operation);
538
539         gb_operation_put(operation);
540
541         return ret;
542 }
543
544 static int gb_connection_ping(struct gb_connection *connection)
545 {
546         struct gb_host_device *hd = connection->hd;
547         int ret;
548
549         if (gb_connection_is_static(connection))
550                 return 0;
551
552         if (gb_connection_is_offloaded(connection)) {
553                 if (!hd->driver->cport_ping)
554                         return 0;
555
556                 ret = hd->driver->cport_ping(hd, connection->intf_cport_id);
557         } else {
558                 ret = gb_connection_ping_operation(connection);
559         }
560
561         if (ret) {
562                 dev_err(&hd->dev, "%s: failed to send ping: %d\n",
563                                 connection->name, ret);
564                 return ret;
565         }
566
567         return 0;
568 }
569
570 /*
571  * Cancel all active operations on a connection.
572  *
573  * Locking: Called with connection lock held and state set to DISABLED or
574  * DISCONNECTING.
575  */
576 static void gb_connection_cancel_operations(struct gb_connection *connection,
577                                                 int errno, unsigned long *flags)
578         __must_hold(&connection->lock)
579 {
580         struct gb_operation *operation;
581
582         while (!list_empty(&connection->operations)) {
583                 operation = list_last_entry(&connection->operations,
584                                                 struct gb_operation, links);
585                 gb_operation_get(operation);
586                 spin_unlock_irqrestore(&connection->lock, *flags);
587
588                 if (gb_operation_is_incoming(operation))
589                         gb_operation_cancel_incoming(operation, errno);
590                 else
591                         gb_operation_cancel(operation, errno);
592
593                 gb_operation_put(operation);
594
595                 spin_lock_irqsave(&connection->lock, *flags);
596         }
597 }
598
599 /*
600  * Cancel all active incoming operations on a connection.
601  *
602  * Locking: Called with connection lock held and state set to ENABLED_TX.
603  */
604 static void
605 gb_connection_flush_incoming_operations(struct gb_connection *connection,
606                                                 int errno, unsigned long *flags)
607         __must_hold(&connection->lock)
608 {
609         struct gb_operation *operation;
610         bool incoming;
611
612         while (!list_empty(&connection->operations)) {
613                 incoming = false;
614                 list_for_each_entry(operation, &connection->operations,
615                                                                 links) {
616                         if (gb_operation_is_incoming(operation)) {
617                                 gb_operation_get(operation);
618                                 incoming = true;
619                                 break;
620                         }
621                 }
622
623                 if (!incoming)
624                         break;
625
626                 spin_unlock_irqrestore(&connection->lock, *flags);
627
628                 /* FIXME: flush, not cancel? */
629                 gb_operation_cancel_incoming(operation, errno);
630                 gb_operation_put(operation);
631
632                 spin_lock_irqsave(&connection->lock, *flags);
633         }
634 }
635
636 /*
637  * _gb_connection_enable() - enable a connection
638  * @connection:         connection to enable
639  * @rx:                 whether to enable incoming requests
640  *
641  * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
642  * ENABLED_TX->ENABLED state transitions.
643  *
644  * Locking: Caller holds connection->mutex.
645  */
646 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
647 {
648         int ret;
649         unsigned long flags;
650
651         /* Handle ENABLED_TX -> ENABLED transitions. */
652         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
653                 if (!(connection->handler && rx))
654                         return 0;
655
656                 spin_lock_irqsave(&connection->lock, flags);
657                 connection->state = GB_CONNECTION_STATE_ENABLED;
658                 spin_unlock_irqrestore(&connection->lock, flags);
659
660                 return 0;
661         }
662
663         ret = gb_connection_hd_cport_enable(connection);
664         if (ret)
665                 return ret;
666
667         ret = gb_connection_svc_connection_create(connection);
668         if (ret)
669                 goto err_hd_cport_disable;
670
671         ret = gb_connection_hd_cport_features_enable(connection);
672         if (ret)
673                 goto err_svc_connection_destroy;
674
675         spin_lock_irqsave(&connection->lock, flags);
676         if (connection->handler && rx)
677                 connection->state = GB_CONNECTION_STATE_ENABLED;
678         else
679                 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
680         spin_unlock_irqrestore(&connection->lock, flags);
681
682         ret = gb_connection_control_connected(connection);
683         if (ret)
684                 goto err_control_disconnecting;
685
686         return 0;
687
688 err_control_disconnecting:
689         gb_connection_control_disconnecting(connection);
690
691         spin_lock_irqsave(&connection->lock, flags);
692         connection->state = GB_CONNECTION_STATE_DISCONNECTING;
693         gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
694         spin_unlock_irqrestore(&connection->lock, flags);
695
696         /* Transmit queue should already be empty. */
697         gb_connection_hd_cport_flush(connection);
698
699         gb_connection_ping(connection);
700         gb_connection_hd_cport_features_disable(connection);
701         gb_connection_svc_connection_quiescing(connection);
702         gb_connection_ping(connection);
703         gb_connection_control_disconnected(connection);
704         connection->state = GB_CONNECTION_STATE_DISABLED;
705 err_svc_connection_destroy:
706         gb_connection_svc_connection_destroy(connection);
707 err_hd_cport_disable:
708         gb_connection_hd_cport_disable(connection);
709
710         return ret;
711 }
712
713 int gb_connection_enable(struct gb_connection *connection)
714 {
715         int ret = 0;
716
717         mutex_lock(&connection->mutex);
718
719         if (connection->state == GB_CONNECTION_STATE_ENABLED)
720                 goto out_unlock;
721
722         ret = _gb_connection_enable(connection, true);
723         if (!ret)
724                 trace_gb_connection_enable(connection);
725
726 out_unlock:
727         mutex_unlock(&connection->mutex);
728
729         return ret;
730 }
731 EXPORT_SYMBOL_GPL(gb_connection_enable);
732
733 int gb_connection_enable_tx(struct gb_connection *connection)
734 {
735         int ret = 0;
736
737         mutex_lock(&connection->mutex);
738
739         if (connection->state == GB_CONNECTION_STATE_ENABLED) {
740                 ret = -EINVAL;
741                 goto out_unlock;
742         }
743
744         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
745                 goto out_unlock;
746
747         ret = _gb_connection_enable(connection, false);
748         if (!ret)
749                 trace_gb_connection_enable(connection);
750
751 out_unlock:
752         mutex_unlock(&connection->mutex);
753
754         return ret;
755 }
756 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
757
758 void gb_connection_disable_rx(struct gb_connection *connection)
759 {
760         unsigned long flags;
761
762         mutex_lock(&connection->mutex);
763
764         spin_lock_irqsave(&connection->lock, flags);
765         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
766                 spin_unlock_irqrestore(&connection->lock, flags);
767                 goto out_unlock;
768         }
769         connection->state = GB_CONNECTION_STATE_ENABLED_TX;
770         gb_connection_flush_incoming_operations(connection, -ESHUTDOWN, &flags);
771         spin_unlock_irqrestore(&connection->lock, flags);
772
773         trace_gb_connection_disable(connection);
774
775 out_unlock:
776         mutex_unlock(&connection->mutex);
777 }
778 EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
779
780 void gb_connection_mode_switch_prepare(struct gb_connection *connection)
781 {
782         connection->mode_switch = true;
783 }
784
785 void gb_connection_mode_switch_complete(struct gb_connection *connection)
786 {
787         gb_connection_svc_connection_destroy(connection);
788         gb_connection_hd_cport_disable(connection);
789         connection->mode_switch = false;
790 }
791
792 void gb_connection_disable(struct gb_connection *connection)
793 {
794         unsigned long flags;
795
796         mutex_lock(&connection->mutex);
797
798         if (connection->state == GB_CONNECTION_STATE_DISABLED)
799                 goto out_unlock;
800
801         trace_gb_connection_disable(connection);
802
803         gb_connection_control_disconnecting(connection);
804
805         spin_lock_irqsave(&connection->lock, flags);
806         connection->state = GB_CONNECTION_STATE_DISCONNECTING;
807         gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
808         spin_unlock_irqrestore(&connection->lock, flags);
809
810         gb_connection_hd_cport_flush(connection);
811
812         gb_connection_ping(connection);
813         gb_connection_hd_cport_features_disable(connection);
814         gb_connection_svc_connection_quiescing(connection);
815         gb_connection_ping(connection);
816
817         gb_connection_control_disconnected(connection);
818
819         connection->state = GB_CONNECTION_STATE_DISABLED;
820
821         /* control-connection tear down is deferred when mode switching */
822         if (!connection->mode_switch) {
823                 gb_connection_svc_connection_destroy(connection);
824                 gb_connection_hd_cport_disable(connection);
825         }
826
827 out_unlock:
828         mutex_unlock(&connection->mutex);
829 }
830 EXPORT_SYMBOL_GPL(gb_connection_disable);
831
832 /* Disable a connection without communicating with the remote end. */
833 void gb_connection_disable_forced(struct gb_connection *connection)
834 {
835         unsigned long flags;
836
837         mutex_lock(&connection->mutex);
838
839         if (connection->state == GB_CONNECTION_STATE_DISABLED)
840                 goto out_unlock;
841
842         trace_gb_connection_disable(connection);
843
844         spin_lock_irqsave(&connection->lock, flags);
845         connection->state = GB_CONNECTION_STATE_DISABLED;
846         gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
847         spin_unlock_irqrestore(&connection->lock, flags);
848
849         gb_connection_hd_cport_flush(connection);
850         gb_connection_hd_cport_features_disable(connection);
851         gb_connection_svc_connection_destroy(connection);
852         gb_connection_hd_cport_disable(connection);
853
854 out_unlock:
855         mutex_unlock(&connection->mutex);
856 }
857 EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
858
859 /* Caller must have disabled the connection before destroying it. */
860 void gb_connection_destroy(struct gb_connection *connection)
861 {
862         unsigned long flags;
863
864         if (!connection)
865                 return;
866
867         if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
868                 gb_connection_disable(connection);
869
870         mutex_lock(&gb_connection_mutex);
871
872         spin_lock_irqsave(&gb_connections_lock, flags);
873         list_del(&connection->bundle_links);
874         list_del(&connection->hd_links);
875         spin_unlock_irqrestore(&gb_connections_lock, flags);
876
877         destroy_workqueue(connection->wq);
878
879         gb_hd_cport_release(connection->hd, connection->hd_cport_id);
880         connection->hd_cport_id = CPORT_ID_BAD;
881
882         mutex_unlock(&gb_connection_mutex);
883
884         gb_connection_put(connection);
885 }
886 EXPORT_SYMBOL_GPL(gb_connection_destroy);
887
888 void gb_connection_latency_tag_enable(struct gb_connection *connection)
889 {
890         struct gb_host_device *hd = connection->hd;
891         int ret;
892
893         if (!hd->driver->latency_tag_enable)
894                 return;
895
896         ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
897         if (ret) {
898                 dev_err(&connection->hd->dev,
899                         "%s: failed to enable latency tag: %d\n",
900                         connection->name, ret);
901         }
902 }
903 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
904
905 void gb_connection_latency_tag_disable(struct gb_connection *connection)
906 {
907         struct gb_host_device *hd = connection->hd;
908         int ret;
909
910         if (!hd->driver->latency_tag_disable)
911                 return;
912
913         ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
914         if (ret) {
915                 dev_err(&connection->hd->dev,
916                         "%s: failed to disable latency tag: %d\n",
917                         connection->name, ret);
918         }
919 }
920 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);