greybus: Revert "connection: switch to using spin_lock_irqsave/spin_lock_irqrestore...
[cascardo/linux.git] / drivers / staging / greybus / connection.c
1 /*
2  * Greybus connections
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13 #include "greybus_trace.h"
14
15
16 static void gb_connection_kref_release(struct kref *kref);
17
18
19 static DEFINE_SPINLOCK(gb_connections_lock);
20 static DEFINE_MUTEX(gb_connection_mutex);
21
22
23 /* Caller holds gb_connection_mutex. */
24 static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
25 {
26         struct gb_host_device *hd = intf->hd;
27         struct gb_connection *connection;
28
29         list_for_each_entry(connection, &hd->connections, hd_links) {
30                 if (connection->intf == intf &&
31                                 connection->intf_cport_id == cport_id)
32                         return true;
33         }
34
35         return false;
36 }
37
38 static void gb_connection_get(struct gb_connection *connection)
39 {
40         kref_get(&connection->kref);
41
42         trace_gb_connection_get(connection);
43 }
44
45 static void gb_connection_put(struct gb_connection *connection)
46 {
47         trace_gb_connection_put(connection);
48
49         kref_put(&connection->kref, gb_connection_kref_release);
50 }
51
52 /*
53  * Returns a reference-counted pointer to the connection if found.
54  */
55 static struct gb_connection *
56 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
57 {
58         struct gb_connection *connection;
59         unsigned long flags;
60
61         spin_lock_irqsave(&gb_connections_lock, flags);
62         list_for_each_entry(connection, &hd->connections, hd_links)
63                 if (connection->hd_cport_id == cport_id) {
64                         gb_connection_get(connection);
65                         goto found;
66                 }
67         connection = NULL;
68 found:
69         spin_unlock_irqrestore(&gb_connections_lock, flags);
70
71         return connection;
72 }
73
74 /*
75  * Callback from the host driver to let us know that data has been
76  * received on the bundle.
77  */
78 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
79                         u8 *data, size_t length)
80 {
81         struct gb_connection *connection;
82
83         trace_gb_hd_in(hd);
84
85         connection = gb_connection_hd_find(hd, cport_id);
86         if (!connection) {
87                 dev_err(&hd->dev,
88                         "nonexistent connection (%zu bytes dropped)\n", length);
89                 return;
90         }
91         gb_connection_recv(connection, data, length);
92         gb_connection_put(connection);
93 }
94 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
95
96 static void gb_connection_kref_release(struct kref *kref)
97 {
98         struct gb_connection *connection;
99
100         connection = container_of(kref, struct gb_connection, kref);
101
102         trace_gb_connection_release(connection);
103
104         kfree(connection);
105 }
106
107 static void gb_connection_init_name(struct gb_connection *connection)
108 {
109         u16 hd_cport_id = connection->hd_cport_id;
110         u16 cport_id = 0;
111         u8 intf_id = 0;
112
113         if (connection->intf) {
114                 intf_id = connection->intf->interface_id;
115                 cport_id = connection->intf_cport_id;
116         }
117
118         snprintf(connection->name, sizeof(connection->name),
119                         "%u/%u:%u", hd_cport_id, intf_id, cport_id);
120 }
121
122 /*
123  * _gb_connection_create() - create a Greybus connection
124  * @hd:                 host device of the connection
125  * @hd_cport_id:        host-device cport id, or -1 for dynamic allocation
126  * @intf:               remote interface, or NULL for static connections
127  * @bundle:             remote-interface bundle (may be NULL)
128  * @cport_id:           remote-interface cport id, or 0 for static connections
129  * @handler:            request handler (may be NULL)
130  * @flags:              connection flags
131  *
132  * Create a Greybus connection, representing the bidirectional link
133  * between a CPort on a (local) Greybus host device and a CPort on
134  * another Greybus interface.
135  *
136  * A connection also maintains the state of operations sent over the
137  * connection.
138  *
139  * Serialised against concurrent create and destroy using the
140  * gb_connection_mutex.
141  *
142  * Return: A pointer to the new connection if successful, or an ERR_PTR
143  * otherwise.
144  */
145 static struct gb_connection *
146 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
147                                 struct gb_interface *intf,
148                                 struct gb_bundle *bundle, int cport_id,
149                                 gb_request_handler_t handler,
150                                 unsigned long flags)
151 {
152         struct gb_connection *connection;
153         unsigned long irqflags;
154         int ret;
155
156         mutex_lock(&gb_connection_mutex);
157
158         if (intf && gb_connection_cport_in_use(intf, cport_id)) {
159                 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
160                 ret = -EBUSY;
161                 goto err_unlock;
162         }
163
164         ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
165         if (ret < 0) {
166                 dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
167                 goto err_unlock;
168         }
169         hd_cport_id = ret;
170
171         connection = kzalloc(sizeof(*connection), GFP_KERNEL);
172         if (!connection) {
173                 ret = -ENOMEM;
174                 goto err_hd_cport_release;
175         }
176
177         connection->hd_cport_id = hd_cport_id;
178         connection->intf_cport_id = cport_id;
179         connection->hd = hd;
180         connection->intf = intf;
181         connection->bundle = bundle;
182         connection->handler = handler;
183         connection->flags = flags;
184         if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
185                 connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
186         connection->state = GB_CONNECTION_STATE_DISABLED;
187
188         atomic_set(&connection->op_cycle, 0);
189         mutex_init(&connection->mutex);
190         spin_lock_init(&connection->lock);
191         INIT_LIST_HEAD(&connection->operations);
192
193         connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
194                                          dev_name(&hd->dev), hd_cport_id);
195         if (!connection->wq) {
196                 ret = -ENOMEM;
197                 goto err_free_connection;
198         }
199
200         kref_init(&connection->kref);
201
202         gb_connection_init_name(connection);
203
204         spin_lock_irqsave(&gb_connections_lock, irqflags);
205         list_add(&connection->hd_links, &hd->connections);
206
207         if (bundle)
208                 list_add(&connection->bundle_links, &bundle->connections);
209         else
210                 INIT_LIST_HEAD(&connection->bundle_links);
211
212         spin_unlock_irqrestore(&gb_connections_lock, irqflags);
213
214         mutex_unlock(&gb_connection_mutex);
215
216         trace_gb_connection_create(connection);
217
218         return connection;
219
220 err_free_connection:
221         kfree(connection);
222 err_hd_cport_release:
223         gb_hd_cport_release(hd, hd_cport_id);
224 err_unlock:
225         mutex_unlock(&gb_connection_mutex);
226
227         return ERR_PTR(ret);
228 }
229
230 struct gb_connection *
231 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
232                                         gb_request_handler_t handler)
233 {
234         return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
235                                         GB_CONNECTION_FLAG_HIGH_PRIO);
236 }
237
238 struct gb_connection *
239 gb_connection_create_control(struct gb_interface *intf)
240 {
241         return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
242                                         GB_CONNECTION_FLAG_CONTROL |
243                                         GB_CONNECTION_FLAG_HIGH_PRIO);
244 }
245
246 struct gb_connection *
247 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
248                                         gb_request_handler_t handler)
249 {
250         struct gb_interface *intf = bundle->intf;
251
252         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
253                                         handler, 0);
254 }
255 EXPORT_SYMBOL_GPL(gb_connection_create);
256
257 struct gb_connection *
258 gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
259                                         gb_request_handler_t handler,
260                                         unsigned long flags)
261 {
262         struct gb_interface *intf = bundle->intf;
263
264         if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
265                 flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
266
267         return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
268                                         handler, flags);
269 }
270 EXPORT_SYMBOL_GPL(gb_connection_create_flags);
271
272 struct gb_connection *
273 gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
274                                         unsigned long flags)
275 {
276         flags |= GB_CONNECTION_FLAG_OFFLOADED;
277
278         return gb_connection_create_flags(bundle, cport_id, NULL, flags);
279 }
280 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
281
282 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
283 {
284         struct gb_host_device *hd = connection->hd;
285         int ret;
286
287         if (!hd->driver->cport_enable)
288                 return 0;
289
290         ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
291                                         connection->flags);
292         if (ret) {
293                 dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
294                                 connection->name, ret);
295                 return ret;
296         }
297
298         return 0;
299 }
300
301 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
302 {
303         struct gb_host_device *hd = connection->hd;
304         int ret;
305
306         if (!hd->driver->cport_disable)
307                 return;
308
309         ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
310         if (ret) {
311                 dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
312                                 connection->name, ret);
313         }
314 }
315
316 static int gb_connection_hd_cport_flush(struct gb_connection *connection)
317 {
318         struct gb_host_device *hd = connection->hd;
319         int ret;
320
321         if (!hd->driver->cport_flush)
322                 return 0;
323
324         ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
325         if (ret) {
326                 dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
327                                 connection->name, ret);
328                 return ret;
329         }
330
331         return 0;
332 }
333
334 static int
335 gb_connection_hd_cport_features_enable(struct gb_connection *connection)
336 {
337         struct gb_host_device *hd = connection->hd;
338         int ret;
339
340         if (!hd->driver->cport_features_enable)
341                 return 0;
342
343         ret = hd->driver->cport_features_enable(hd, connection->hd_cport_id);
344         if (ret) {
345                 dev_err(&hd->dev, "%s: failed to enable CPort features: %d\n",
346                         connection->name, ret);
347                 return ret;
348         }
349
350         return 0;
351 }
352
353 static void
354 gb_connection_hd_cport_features_disable(struct gb_connection *connection)
355 {
356         struct gb_host_device *hd = connection->hd;
357
358         if (!hd->driver->cport_features_disable)
359                 return;
360
361         hd->driver->cport_features_disable(hd, connection->hd_cport_id);
362 }
363
364 /*
365  * Request the SVC to create a connection from AP's cport to interface's
366  * cport.
367  */
368 static int
369 gb_connection_svc_connection_create(struct gb_connection *connection)
370 {
371         struct gb_host_device *hd = connection->hd;
372         struct gb_interface *intf;
373         u8 cport_flags;
374         int ret;
375
376         if (gb_connection_is_static(connection))
377                 return 0;
378
379         intf = connection->intf;
380
381         /*
382          * Enable either E2EFC or CSD, unless no flow control is requested.
383          */
384         cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
385         if (gb_connection_flow_control_disabled(connection)) {
386                 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
387         } else if (gb_connection_e2efc_enabled(connection)) {
388                 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
389                                 GB_SVC_CPORT_FLAG_E2EFC;
390         }
391
392         ret = gb_svc_connection_create(hd->svc,
393                         hd->svc->ap_intf_id,
394                         connection->hd_cport_id,
395                         intf->interface_id,
396                         connection->intf_cport_id,
397                         cport_flags);
398         if (ret) {
399                 dev_err(&connection->hd->dev,
400                         "%s: failed to create svc connection: %d\n",
401                         connection->name, ret);
402                 return ret;
403         }
404
405         return 0;
406 }
407
408 static void
409 gb_connection_svc_connection_destroy(struct gb_connection *connection)
410 {
411         if (gb_connection_is_static(connection))
412                 return;
413
414         gb_svc_connection_destroy(connection->hd->svc,
415                                   connection->hd->svc->ap_intf_id,
416                                   connection->hd_cport_id,
417                                   connection->intf->interface_id,
418                                   connection->intf_cport_id);
419 }
420
421 static void
422 gb_connection_svc_connection_quiescing(struct gb_connection *connection)
423 {
424         struct gb_host_device *hd = connection->hd;
425
426         if (gb_connection_is_static(connection))
427                 return;
428
429         gb_svc_connection_quiescing(hd->svc,
430                                         hd->svc->ap_intf_id,
431                                         connection->hd_cport_id,
432                                         connection->intf->interface_id,
433                                         connection->intf_cport_id);
434 }
435
436 /* Inform Interface about active CPorts */
437 static int gb_connection_control_connected(struct gb_connection *connection)
438 {
439         struct gb_control *control;
440         u16 cport_id = connection->intf_cport_id;
441         int ret;
442
443         if (gb_connection_is_static(connection))
444                 return 0;
445
446         /*
447          * HACK: Suppress connected request for the offloaded camera
448          * connection as it is currently not supported by firmware. Note that
449          * the corresponding non-fatal disconnected event is still sent.
450          */
451         if (gb_connection_is_offloaded(connection) &&
452                         connection->flags & GB_CONNECTION_FLAG_CDSI1) {
453                 return 0;
454         }
455
456         if (gb_connection_is_control(connection))
457                 return 0;
458
459         control = connection->intf->control;
460
461         ret = gb_control_connected_operation(control, cport_id);
462         if (ret) {
463                 dev_err(&connection->bundle->dev,
464                         "failed to connect cport: %d\n", ret);
465                 return ret;
466         }
467
468         return 0;
469 }
470
471 static void
472 gb_connection_control_disconnecting(struct gb_connection *connection)
473 {
474         struct gb_control *control;
475         u16 cport_id = connection->intf_cport_id;
476         int ret;
477
478         if (gb_connection_is_static(connection))
479                 return;
480
481         control = connection->intf->control;
482
483         ret = gb_control_disconnecting_operation(control, cport_id);
484         if (ret) {
485                 dev_err(&connection->hd->dev,
486                                 "%s: failed to send disconnecting: %d\n",
487                                 connection->name, ret);
488         }
489 }
490
491 static void
492 gb_connection_control_disconnected(struct gb_connection *connection)
493 {
494         struct gb_control *control;
495         u16 cport_id = connection->intf_cport_id;
496         int ret;
497
498         if (gb_connection_is_static(connection))
499                 return;
500
501         control = connection->intf->control;
502
503         if (gb_connection_is_control(connection)) {
504                 if (connection->mode_switch) {
505                         ret = gb_control_mode_switch_operation(control);
506                         if (ret) {
507                                 /*
508                                  * Allow mode switch to time out waiting for
509                                  * mailbox event.
510                                  */
511                                 return;
512                         }
513                 }
514
515                 return;
516         }
517
518         ret = gb_control_disconnected_operation(control, cport_id);
519         if (ret) {
520                 dev_warn(&connection->bundle->dev,
521                          "failed to disconnect cport: %d\n", ret);
522         }
523 }
524
525 static int gb_connection_ping_operation(struct gb_connection *connection)
526 {
527         struct gb_operation *operation;
528         int ret;
529
530         operation = gb_operation_create_core(connection,
531                                                 GB_REQUEST_TYPE_PING,
532                                                 0, 0, 0,
533                                                 GFP_KERNEL);
534         if (!operation)
535                 return -ENOMEM;
536
537         ret = gb_operation_request_send_sync(operation);
538
539         gb_operation_put(operation);
540
541         return ret;
542 }
543
544 static int gb_connection_ping(struct gb_connection *connection)
545 {
546         struct gb_host_device *hd = connection->hd;
547         int ret;
548
549         if (gb_connection_is_static(connection))
550                 return 0;
551
552         if (gb_connection_is_offloaded(connection)) {
553                 if (!hd->driver->cport_ping)
554                         return 0;
555
556                 ret = hd->driver->cport_ping(hd, connection->intf_cport_id);
557         } else {
558                 ret = gb_connection_ping_operation(connection);
559         }
560
561         if (ret) {
562                 dev_err(&hd->dev, "%s: failed to send ping: %d\n",
563                                 connection->name, ret);
564                 return ret;
565         }
566
567         return 0;
568 }
569
570 /*
571  * Cancel all active operations on a connection.
572  *
573  * Locking: Called with connection lock held and state set to DISABLED or
574  * DISCONNECTING.
575  */
576 static void gb_connection_cancel_operations(struct gb_connection *connection,
577                                                 int errno)
578         __must_hold(&connection->lock)
579 {
580         struct gb_operation *operation;
581
582         while (!list_empty(&connection->operations)) {
583                 operation = list_last_entry(&connection->operations,
584                                                 struct gb_operation, links);
585                 gb_operation_get(operation);
586                 spin_unlock_irq(&connection->lock);
587
588                 if (gb_operation_is_incoming(operation))
589                         gb_operation_cancel_incoming(operation, errno);
590                 else
591                         gb_operation_cancel(operation, errno);
592
593                 gb_operation_put(operation);
594
595                 spin_lock_irq(&connection->lock);
596         }
597 }
598
599 /*
600  * Cancel all active incoming operations on a connection.
601  *
602  * Locking: Called with connection lock held and state set to ENABLED_TX.
603  */
604 static void
605 gb_connection_flush_incoming_operations(struct gb_connection *connection,
606                                                 int errno)
607         __must_hold(&connection->lock)
608 {
609         struct gb_operation *operation;
610         bool incoming;
611
612         while (!list_empty(&connection->operations)) {
613                 incoming = false;
614                 list_for_each_entry(operation, &connection->operations,
615                                                                 links) {
616                         if (gb_operation_is_incoming(operation)) {
617                                 gb_operation_get(operation);
618                                 incoming = true;
619                                 break;
620                         }
621                 }
622
623                 if (!incoming)
624                         break;
625
626                 spin_unlock_irq(&connection->lock);
627
628                 /* FIXME: flush, not cancel? */
629                 gb_operation_cancel_incoming(operation, errno);
630                 gb_operation_put(operation);
631
632                 spin_lock_irq(&connection->lock);
633         }
634 }
635
636 /*
637  * _gb_connection_enable() - enable a connection
638  * @connection:         connection to enable
639  * @rx:                 whether to enable incoming requests
640  *
641  * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
642  * ENABLED_TX->ENABLED state transitions.
643  *
644  * Locking: Caller holds connection->mutex.
645  */
646 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
647 {
648         int ret;
649
650         /* Handle ENABLED_TX -> ENABLED transitions. */
651         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
652                 if (!(connection->handler && rx))
653                         return 0;
654
655                 spin_lock_irq(&connection->lock);
656                 connection->state = GB_CONNECTION_STATE_ENABLED;
657                 spin_unlock_irq(&connection->lock);
658
659                 return 0;
660         }
661
662         ret = gb_connection_hd_cport_enable(connection);
663         if (ret)
664                 return ret;
665
666         ret = gb_connection_svc_connection_create(connection);
667         if (ret)
668                 goto err_hd_cport_disable;
669
670         ret = gb_connection_hd_cport_features_enable(connection);
671         if (ret)
672                 goto err_svc_connection_destroy;
673
674         spin_lock_irq(&connection->lock);
675         if (connection->handler && rx)
676                 connection->state = GB_CONNECTION_STATE_ENABLED;
677         else
678                 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
679         spin_unlock_irq(&connection->lock);
680
681         ret = gb_connection_control_connected(connection);
682         if (ret)
683                 goto err_control_disconnecting;
684
685         return 0;
686
687 err_control_disconnecting:
688         gb_connection_control_disconnecting(connection);
689
690         spin_lock_irq(&connection->lock);
691         connection->state = GB_CONNECTION_STATE_DISCONNECTING;
692         gb_connection_cancel_operations(connection, -ESHUTDOWN);
693         spin_unlock_irq(&connection->lock);
694
695         /* Transmit queue should already be empty. */
696         gb_connection_hd_cport_flush(connection);
697
698         gb_connection_ping(connection);
699         gb_connection_hd_cport_features_disable(connection);
700         gb_connection_svc_connection_quiescing(connection);
701         gb_connection_ping(connection);
702         gb_connection_control_disconnected(connection);
703         connection->state = GB_CONNECTION_STATE_DISABLED;
704 err_svc_connection_destroy:
705         gb_connection_svc_connection_destroy(connection);
706 err_hd_cport_disable:
707         gb_connection_hd_cport_disable(connection);
708
709         return ret;
710 }
711
712 int gb_connection_enable(struct gb_connection *connection)
713 {
714         int ret = 0;
715
716         mutex_lock(&connection->mutex);
717
718         if (connection->state == GB_CONNECTION_STATE_ENABLED)
719                 goto out_unlock;
720
721         ret = _gb_connection_enable(connection, true);
722         if (!ret)
723                 trace_gb_connection_enable(connection);
724
725 out_unlock:
726         mutex_unlock(&connection->mutex);
727
728         return ret;
729 }
730 EXPORT_SYMBOL_GPL(gb_connection_enable);
731
732 int gb_connection_enable_tx(struct gb_connection *connection)
733 {
734         int ret = 0;
735
736         mutex_lock(&connection->mutex);
737
738         if (connection->state == GB_CONNECTION_STATE_ENABLED) {
739                 ret = -EINVAL;
740                 goto out_unlock;
741         }
742
743         if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
744                 goto out_unlock;
745
746         ret = _gb_connection_enable(connection, false);
747         if (!ret)
748                 trace_gb_connection_enable(connection);
749
750 out_unlock:
751         mutex_unlock(&connection->mutex);
752
753         return ret;
754 }
755 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
756
757 void gb_connection_disable_rx(struct gb_connection *connection)
758 {
759         mutex_lock(&connection->mutex);
760
761         spin_lock_irq(&connection->lock);
762         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
763                 spin_unlock_irq(&connection->lock);
764                 goto out_unlock;
765         }
766         connection->state = GB_CONNECTION_STATE_ENABLED_TX;
767         gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
768         spin_unlock_irq(&connection->lock);
769
770         trace_gb_connection_disable(connection);
771
772 out_unlock:
773         mutex_unlock(&connection->mutex);
774 }
775 EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
776
777 void gb_connection_mode_switch_prepare(struct gb_connection *connection)
778 {
779         connection->mode_switch = true;
780 }
781
782 void gb_connection_mode_switch_complete(struct gb_connection *connection)
783 {
784         gb_connection_svc_connection_destroy(connection);
785         gb_connection_hd_cport_disable(connection);
786         connection->mode_switch = false;
787 }
788
789 void gb_connection_disable(struct gb_connection *connection)
790 {
791         mutex_lock(&connection->mutex);
792
793         if (connection->state == GB_CONNECTION_STATE_DISABLED)
794                 goto out_unlock;
795
796         trace_gb_connection_disable(connection);
797
798         gb_connection_control_disconnecting(connection);
799
800         spin_lock_irq(&connection->lock);
801         connection->state = GB_CONNECTION_STATE_DISCONNECTING;
802         gb_connection_cancel_operations(connection, -ESHUTDOWN);
803         spin_unlock_irq(&connection->lock);
804
805         gb_connection_hd_cport_flush(connection);
806
807         gb_connection_ping(connection);
808         gb_connection_hd_cport_features_disable(connection);
809         gb_connection_svc_connection_quiescing(connection);
810         gb_connection_ping(connection);
811
812         gb_connection_control_disconnected(connection);
813
814         connection->state = GB_CONNECTION_STATE_DISABLED;
815
816         /* control-connection tear down is deferred when mode switching */
817         if (!connection->mode_switch) {
818                 gb_connection_svc_connection_destroy(connection);
819                 gb_connection_hd_cport_disable(connection);
820         }
821
822 out_unlock:
823         mutex_unlock(&connection->mutex);
824 }
825 EXPORT_SYMBOL_GPL(gb_connection_disable);
826
827 /* Disable a connection without communicating with the remote end. */
828 void gb_connection_disable_forced(struct gb_connection *connection)
829 {
830         mutex_lock(&connection->mutex);
831
832         if (connection->state == GB_CONNECTION_STATE_DISABLED)
833                 goto out_unlock;
834
835         trace_gb_connection_disable(connection);
836
837         spin_lock_irq(&connection->lock);
838         connection->state = GB_CONNECTION_STATE_DISABLED;
839         gb_connection_cancel_operations(connection, -ESHUTDOWN);
840         spin_unlock_irq(&connection->lock);
841
842         gb_connection_hd_cport_flush(connection);
843         gb_connection_hd_cport_features_disable(connection);
844         gb_connection_svc_connection_destroy(connection);
845         gb_connection_hd_cport_disable(connection);
846
847 out_unlock:
848         mutex_unlock(&connection->mutex);
849 }
850 EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
851
852 /* Caller must have disabled the connection before destroying it. */
853 void gb_connection_destroy(struct gb_connection *connection)
854 {
855         unsigned long flags;
856
857         if (!connection)
858                 return;
859
860         if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
861                 gb_connection_disable(connection);
862
863         mutex_lock(&gb_connection_mutex);
864
865         spin_lock_irqsave(&gb_connections_lock, flags);
866         list_del(&connection->bundle_links);
867         list_del(&connection->hd_links);
868         spin_unlock_irqrestore(&gb_connections_lock, flags);
869
870         destroy_workqueue(connection->wq);
871
872         gb_hd_cport_release(connection->hd, connection->hd_cport_id);
873         connection->hd_cport_id = CPORT_ID_BAD;
874
875         mutex_unlock(&gb_connection_mutex);
876
877         gb_connection_put(connection);
878 }
879 EXPORT_SYMBOL_GPL(gb_connection_destroy);
880
881 void gb_connection_latency_tag_enable(struct gb_connection *connection)
882 {
883         struct gb_host_device *hd = connection->hd;
884         int ret;
885
886         if (!hd->driver->latency_tag_enable)
887                 return;
888
889         ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
890         if (ret) {
891                 dev_err(&connection->hd->dev,
892                         "%s: failed to enable latency tag: %d\n",
893                         connection->name, ret);
894         }
895 }
896 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
897
898 void gb_connection_latency_tag_disable(struct gb_connection *connection)
899 {
900         struct gb_host_device *hd = connection->hd;
901         int ret;
902
903         if (!hd->driver->latency_tag_disable)
904                 return;
905
906         ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
907         if (ret) {
908                 dev_err(&connection->hd->dev,
909                         "%s: failed to disable latency tag: %d\n",
910                         connection->name, ret);
911         }
912 }
913 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);