4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/workqueue.h>
13 #include "greybus_trace.h"
16 static void gb_connection_kref_release(struct kref *kref);
19 static DEFINE_SPINLOCK(gb_connections_lock);
20 static DEFINE_MUTEX(gb_connection_mutex);
23 /* Caller holds gb_connection_mutex. */
24 static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
26 struct gb_host_device *hd = intf->hd;
27 struct gb_connection *connection;
29 list_for_each_entry(connection, &hd->connections, hd_links) {
30 if (connection->intf == intf &&
31 connection->intf_cport_id == cport_id)
38 static void gb_connection_get(struct gb_connection *connection)
40 kref_get(&connection->kref);
42 trace_gb_connection_get(connection);
45 static void gb_connection_put(struct gb_connection *connection)
47 trace_gb_connection_put(connection);
49 kref_put(&connection->kref, gb_connection_kref_release);
53 * Returns a reference-counted pointer to the connection if found.
55 static struct gb_connection *
56 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
58 struct gb_connection *connection;
61 spin_lock_irqsave(&gb_connections_lock, flags);
62 list_for_each_entry(connection, &hd->connections, hd_links)
63 if (connection->hd_cport_id == cport_id) {
64 gb_connection_get(connection);
69 spin_unlock_irqrestore(&gb_connections_lock, flags);
75 * Callback from the host driver to let us know that data has been
76 * received on the bundle.
78 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
79 u8 *data, size_t length)
81 struct gb_connection *connection;
85 connection = gb_connection_hd_find(hd, cport_id);
88 "nonexistent connection (%zu bytes dropped)\n", length);
91 gb_connection_recv(connection, data, length);
92 gb_connection_put(connection);
94 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
96 static void gb_connection_kref_release(struct kref *kref)
98 struct gb_connection *connection;
100 connection = container_of(kref, struct gb_connection, kref);
102 trace_gb_connection_release(connection);
107 static void gb_connection_init_name(struct gb_connection *connection)
109 u16 hd_cport_id = connection->hd_cport_id;
113 if (connection->intf) {
114 intf_id = connection->intf->interface_id;
115 cport_id = connection->intf_cport_id;
118 snprintf(connection->name, sizeof(connection->name),
119 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
123 * _gb_connection_create() - create a Greybus connection
124 * @hd: host device of the connection
125 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
126 * @intf: remote interface, or NULL for static connections
127 * @bundle: remote-interface bundle (may be NULL)
128 * @cport_id: remote-interface cport id, or 0 for static connections
129 * @handler: request handler (may be NULL)
130 * @flags: connection flags
132 * Create a Greybus connection, representing the bidirectional link
133 * between a CPort on a (local) Greybus host device and a CPort on
134 * another Greybus interface.
136 * A connection also maintains the state of operations sent over the
139 * Serialised against concurrent create and destroy using the
140 * gb_connection_mutex.
142 * Return: A pointer to the new connection if successful, or an ERR_PTR
145 static struct gb_connection *
146 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
147 struct gb_interface *intf,
148 struct gb_bundle *bundle, int cport_id,
149 gb_request_handler_t handler,
152 struct gb_connection *connection;
155 mutex_lock(&gb_connection_mutex);
157 if (intf && gb_connection_cport_in_use(intf, cport_id)) {
158 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
163 ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
165 dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
170 connection = kzalloc(sizeof(*connection), GFP_KERNEL);
173 goto err_hd_cport_release;
176 connection->hd_cport_id = hd_cport_id;
177 connection->intf_cport_id = cport_id;
179 connection->intf = intf;
180 connection->bundle = bundle;
181 connection->handler = handler;
182 connection->flags = flags;
183 if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
184 connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
185 connection->state = GB_CONNECTION_STATE_DISABLED;
187 atomic_set(&connection->op_cycle, 0);
188 mutex_init(&connection->mutex);
189 spin_lock_init(&connection->lock);
190 INIT_LIST_HEAD(&connection->operations);
192 connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
193 dev_name(&hd->dev), hd_cport_id);
194 if (!connection->wq) {
196 goto err_free_connection;
199 kref_init(&connection->kref);
201 gb_connection_init_name(connection);
203 spin_lock_irq(&gb_connections_lock);
204 list_add(&connection->hd_links, &hd->connections);
207 list_add(&connection->bundle_links, &bundle->connections);
209 INIT_LIST_HEAD(&connection->bundle_links);
211 spin_unlock_irq(&gb_connections_lock);
213 mutex_unlock(&gb_connection_mutex);
215 trace_gb_connection_create(connection);
221 err_hd_cport_release:
222 gb_hd_cport_release(hd, hd_cport_id);
224 mutex_unlock(&gb_connection_mutex);
229 struct gb_connection *
230 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
231 gb_request_handler_t handler)
233 return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
234 GB_CONNECTION_FLAG_HIGH_PRIO);
237 struct gb_connection *
238 gb_connection_create_control(struct gb_interface *intf)
240 return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
241 GB_CONNECTION_FLAG_CONTROL |
242 GB_CONNECTION_FLAG_HIGH_PRIO);
245 struct gb_connection *
246 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
247 gb_request_handler_t handler)
249 struct gb_interface *intf = bundle->intf;
251 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
254 EXPORT_SYMBOL_GPL(gb_connection_create);
256 struct gb_connection *
257 gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
258 gb_request_handler_t handler,
261 struct gb_interface *intf = bundle->intf;
263 if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
264 flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
266 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
269 EXPORT_SYMBOL_GPL(gb_connection_create_flags);
271 struct gb_connection *
272 gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
275 flags |= GB_CONNECTION_FLAG_OFFLOADED;
277 return gb_connection_create_flags(bundle, cport_id, NULL, flags);
279 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
281 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
283 struct gb_host_device *hd = connection->hd;
286 if (!hd->driver->cport_enable)
289 ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
292 dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
293 connection->name, ret);
300 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
302 struct gb_host_device *hd = connection->hd;
305 if (!hd->driver->cport_disable)
308 ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
310 dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
311 connection->name, ret);
315 static int gb_connection_hd_cport_flush(struct gb_connection *connection)
317 struct gb_host_device *hd = connection->hd;
320 if (!hd->driver->cport_flush)
323 ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
325 dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
326 connection->name, ret);
334 gb_connection_hd_cport_features_enable(struct gb_connection *connection)
336 struct gb_host_device *hd = connection->hd;
339 if (!hd->driver->cport_features_enable)
342 ret = hd->driver->cport_features_enable(hd, connection->hd_cport_id);
344 dev_err(&hd->dev, "%s: failed to enable CPort features: %d\n",
345 connection->name, ret);
353 gb_connection_hd_cport_features_disable(struct gb_connection *connection)
355 struct gb_host_device *hd = connection->hd;
357 if (!hd->driver->cport_features_disable)
360 hd->driver->cport_features_disable(hd, connection->hd_cport_id);
364 * Request the SVC to create a connection from AP's cport to interface's
368 gb_connection_svc_connection_create(struct gb_connection *connection)
370 struct gb_host_device *hd = connection->hd;
371 struct gb_interface *intf;
375 if (gb_connection_is_static(connection))
378 intf = connection->intf;
381 * Enable either E2EFC or CSD, unless no flow control is requested.
383 cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
384 if (gb_connection_flow_control_disabled(connection)) {
385 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
386 } else if (gb_connection_e2efc_enabled(connection)) {
387 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
388 GB_SVC_CPORT_FLAG_E2EFC;
391 ret = gb_svc_connection_create(hd->svc,
393 connection->hd_cport_id,
395 connection->intf_cport_id,
398 dev_err(&connection->hd->dev,
399 "%s: failed to create svc connection: %d\n",
400 connection->name, ret);
408 gb_connection_svc_connection_destroy(struct gb_connection *connection)
410 if (gb_connection_is_static(connection))
413 gb_svc_connection_destroy(connection->hd->svc,
414 connection->hd->svc->ap_intf_id,
415 connection->hd_cport_id,
416 connection->intf->interface_id,
417 connection->intf_cport_id);
420 /* Inform Interface about active CPorts */
421 static int gb_connection_control_connected(struct gb_connection *connection)
423 struct gb_control *control;
424 u16 cport_id = connection->intf_cport_id;
427 if (gb_connection_is_static(connection))
430 if (gb_connection_is_control(connection))
433 control = connection->intf->control;
435 ret = gb_control_connected_operation(control, cport_id);
437 dev_err(&connection->bundle->dev,
438 "failed to connect cport: %d\n", ret);
446 gb_connection_control_disconnecting(struct gb_connection *connection)
448 struct gb_control *control;
449 u16 cport_id = connection->intf_cport_id;
452 if (gb_connection_is_static(connection))
455 control = connection->intf->control;
457 ret = gb_control_disconnecting_operation(control, cport_id);
459 dev_err(&connection->hd->dev,
460 "%s: failed to send disconnecting: %d\n",
461 connection->name, ret);
466 gb_connection_control_disconnected(struct gb_connection *connection)
468 struct gb_control *control;
469 u16 cport_id = connection->intf_cport_id;
472 if (gb_connection_is_static(connection))
475 control = connection->intf->control;
477 if (gb_connection_is_control(connection)) {
478 if (connection->mode_switch) {
479 ret = gb_control_mode_switch_operation(control);
482 * Allow mode switch to time out waiting for
492 ret = gb_control_disconnected_operation(control, cport_id);
494 dev_warn(&connection->bundle->dev,
495 "failed to disconnect cport: %d\n", ret);
499 static int gb_connection_ping_operation(struct gb_connection *connection)
501 struct gb_operation *operation;
504 operation = gb_operation_create_core(connection,
505 GB_REQUEST_TYPE_PING,
511 ret = gb_operation_request_send_sync(operation);
513 gb_operation_put(operation);
518 static int gb_connection_ping(struct gb_connection *connection)
520 struct gb_host_device *hd = connection->hd;
523 if (gb_connection_is_static(connection))
526 if (gb_connection_is_offloaded(connection)) {
527 if (!hd->driver->cport_ping)
530 ret = hd->driver->cport_ping(hd, connection->intf_cport_id);
532 ret = gb_connection_ping_operation(connection);
536 dev_err(&hd->dev, "%s: failed to send ping: %d\n",
537 connection->name, ret);
545 * Cancel all active operations on a connection.
547 * Locking: Called with connection lock held and state set to DISABLED or
550 static void gb_connection_cancel_operations(struct gb_connection *connection,
552 __must_hold(&connection->lock)
554 struct gb_operation *operation;
556 while (!list_empty(&connection->operations)) {
557 operation = list_last_entry(&connection->operations,
558 struct gb_operation, links);
559 gb_operation_get(operation);
560 spin_unlock_irq(&connection->lock);
562 if (gb_operation_is_incoming(operation))
563 gb_operation_cancel_incoming(operation, errno);
565 gb_operation_cancel(operation, errno);
567 gb_operation_put(operation);
569 spin_lock_irq(&connection->lock);
574 * Cancel all active incoming operations on a connection.
576 * Locking: Called with connection lock held and state set to ENABLED_TX.
579 gb_connection_flush_incoming_operations(struct gb_connection *connection,
581 __must_hold(&connection->lock)
583 struct gb_operation *operation;
586 while (!list_empty(&connection->operations)) {
588 list_for_each_entry(operation, &connection->operations,
590 if (gb_operation_is_incoming(operation)) {
591 gb_operation_get(operation);
600 spin_unlock_irq(&connection->lock);
602 /* FIXME: flush, not cancel? */
603 gb_operation_cancel_incoming(operation, errno);
604 gb_operation_put(operation);
606 spin_lock_irq(&connection->lock);
611 * _gb_connection_enable() - enable a connection
612 * @connection: connection to enable
613 * @rx: whether to enable incoming requests
615 * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
616 * ENABLED_TX->ENABLED state transitions.
618 * Locking: Caller holds connection->mutex.
620 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
624 /* Handle ENABLED_TX -> ENABLED transitions. */
625 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
626 if (!(connection->handler && rx))
629 spin_lock_irq(&connection->lock);
630 connection->state = GB_CONNECTION_STATE_ENABLED;
631 spin_unlock_irq(&connection->lock);
636 ret = gb_connection_hd_cport_enable(connection);
640 ret = gb_connection_svc_connection_create(connection);
642 goto err_hd_cport_disable;
644 ret = gb_connection_hd_cport_features_enable(connection);
646 goto err_svc_connection_destroy;
648 spin_lock_irq(&connection->lock);
649 if (connection->handler && rx)
650 connection->state = GB_CONNECTION_STATE_ENABLED;
652 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
653 spin_unlock_irq(&connection->lock);
655 ret = gb_connection_control_connected(connection);
657 goto err_control_disconnecting;
661 err_control_disconnecting:
662 gb_connection_control_disconnecting(connection);
664 spin_lock_irq(&connection->lock);
665 connection->state = GB_CONNECTION_STATE_DISCONNECTING;
666 gb_connection_cancel_operations(connection, -ESHUTDOWN);
667 spin_unlock_irq(&connection->lock);
669 /* Transmit queue should already be empty. */
670 gb_connection_hd_cport_flush(connection);
672 gb_connection_ping(connection);
673 gb_connection_hd_cport_features_disable(connection);
674 gb_connection_control_disconnected(connection);
675 connection->state = GB_CONNECTION_STATE_DISABLED;
676 err_svc_connection_destroy:
677 gb_connection_svc_connection_destroy(connection);
678 err_hd_cport_disable:
679 gb_connection_hd_cport_disable(connection);
684 int gb_connection_enable(struct gb_connection *connection)
688 mutex_lock(&connection->mutex);
690 if (connection->state == GB_CONNECTION_STATE_ENABLED)
693 ret = _gb_connection_enable(connection, true);
695 trace_gb_connection_enable(connection);
698 mutex_unlock(&connection->mutex);
702 EXPORT_SYMBOL_GPL(gb_connection_enable);
704 int gb_connection_enable_tx(struct gb_connection *connection)
708 mutex_lock(&connection->mutex);
710 if (connection->state == GB_CONNECTION_STATE_ENABLED) {
715 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
718 ret = _gb_connection_enable(connection, false);
720 trace_gb_connection_enable(connection);
723 mutex_unlock(&connection->mutex);
727 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
729 void gb_connection_disable_rx(struct gb_connection *connection)
731 mutex_lock(&connection->mutex);
733 spin_lock_irq(&connection->lock);
734 if (connection->state != GB_CONNECTION_STATE_ENABLED) {
735 spin_unlock_irq(&connection->lock);
738 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
739 gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
740 spin_unlock_irq(&connection->lock);
742 trace_gb_connection_disable(connection);
745 mutex_unlock(&connection->mutex);
747 EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
749 void gb_connection_mode_switch_prepare(struct gb_connection *connection)
751 connection->mode_switch = true;
754 void gb_connection_mode_switch_complete(struct gb_connection *connection)
756 gb_connection_svc_connection_destroy(connection);
757 gb_connection_hd_cport_disable(connection);
758 connection->mode_switch = false;
761 void gb_connection_disable(struct gb_connection *connection)
763 mutex_lock(&connection->mutex);
765 if (connection->state == GB_CONNECTION_STATE_DISABLED)
768 trace_gb_connection_disable(connection);
770 gb_connection_control_disconnecting(connection);
772 spin_lock_irq(&connection->lock);
773 connection->state = GB_CONNECTION_STATE_DISCONNECTING;
774 gb_connection_cancel_operations(connection, -ESHUTDOWN);
775 spin_unlock_irq(&connection->lock);
777 gb_connection_hd_cport_flush(connection);
779 gb_connection_ping(connection);
780 gb_connection_hd_cport_features_disable(connection);
782 gb_connection_control_disconnected(connection);
784 connection->state = GB_CONNECTION_STATE_DISABLED;
786 /* control-connection tear down is deferred when mode switching */
787 if (!connection->mode_switch) {
788 gb_connection_svc_connection_destroy(connection);
789 gb_connection_hd_cport_disable(connection);
793 mutex_unlock(&connection->mutex);
795 EXPORT_SYMBOL_GPL(gb_connection_disable);
797 /* Disable a connection without communicating with the remote end. */
798 void gb_connection_disable_forced(struct gb_connection *connection)
800 mutex_lock(&connection->mutex);
802 if (connection->state == GB_CONNECTION_STATE_DISABLED)
805 trace_gb_connection_disable(connection);
807 spin_lock_irq(&connection->lock);
808 connection->state = GB_CONNECTION_STATE_DISABLED;
809 gb_connection_cancel_operations(connection, -ESHUTDOWN);
810 spin_unlock_irq(&connection->lock);
812 gb_connection_hd_cport_flush(connection);
813 gb_connection_hd_cport_features_disable(connection);
814 gb_connection_svc_connection_destroy(connection);
815 gb_connection_hd_cport_disable(connection);
818 mutex_unlock(&connection->mutex);
820 EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
822 /* Caller must have disabled the connection before destroying it. */
823 void gb_connection_destroy(struct gb_connection *connection)
828 if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
829 gb_connection_disable(connection);
831 mutex_lock(&gb_connection_mutex);
833 spin_lock_irq(&gb_connections_lock);
834 list_del(&connection->bundle_links);
835 list_del(&connection->hd_links);
836 spin_unlock_irq(&gb_connections_lock);
838 destroy_workqueue(connection->wq);
840 gb_hd_cport_release(connection->hd, connection->hd_cport_id);
841 connection->hd_cport_id = CPORT_ID_BAD;
843 mutex_unlock(&gb_connection_mutex);
845 gb_connection_put(connection);
847 EXPORT_SYMBOL_GPL(gb_connection_destroy);
849 void gb_connection_latency_tag_enable(struct gb_connection *connection)
851 struct gb_host_device *hd = connection->hd;
854 if (!hd->driver->latency_tag_enable)
857 ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
859 dev_err(&connection->hd->dev,
860 "%s: failed to enable latency tag: %d\n",
861 connection->name, ret);
864 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
866 void gb_connection_latency_tag_disable(struct gb_connection *connection)
868 struct gb_host_device *hd = connection->hd;
871 if (!hd->driver->latency_tag_disable)
874 ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
876 dev_err(&connection->hd->dev,
877 "%s: failed to disable latency tag: %d\n",
878 connection->name, ret);
881 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);