#include "greybus.h"
-static int gb_connection_bind_protocol(struct gb_connection *connection);
-static void gb_connection_unbind_protocol(struct gb_connection *connection);
+static void gb_connection_kref_release(struct kref *kref);
static DEFINE_SPINLOCK(gb_connections_lock);
+static DEFINE_MUTEX(gb_connection_mutex);
-/* This is only used at initialization time; no locking is required. */
+
+/* Caller holds gb_connection_mutex. */
static struct gb_connection *
gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
{
return NULL;
}
+static void gb_connection_get(struct gb_connection *connection)
+{
+ kref_get(&connection->kref);
+}
+
+static void gb_connection_put(struct gb_connection *connection)
+{
+ kref_put(&connection->kref, gb_connection_kref_release);
+}
+
+/*
+ * Returns a reference-counted pointer to the connection if found.
+ */
static struct gb_connection *
gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
{
spin_lock_irqsave(&gb_connections_lock, flags);
list_for_each_entry(connection, &hd->connections, hd_links)
- if (connection->hd_cport_id == cport_id)
+ if (connection->hd_cport_id == cport_id) {
+ gb_connection_get(connection);
goto found;
+ }
connection = NULL;
found:
spin_unlock_irqrestore(&gb_connections_lock, flags);
return;
}
gb_connection_recv(connection, data, length);
+ gb_connection_put(connection);
}
EXPORT_SYMBOL_GPL(greybus_data_rcvd);
-static DEFINE_MUTEX(connection_mutex);
-
static void gb_connection_kref_release(struct kref *kref)
{
struct gb_connection *connection;
connection = container_of(kref, struct gb_connection, kref);
- destroy_workqueue(connection->wq);
+
kfree(connection);
- mutex_unlock(&connection_mutex);
}
static void gb_connection_init_name(struct gb_connection *connection)
}
/*
- * gb_connection_create() - create a Greybus connection
+ * _gb_connection_create() - create a Greybus connection
* @hd: host device of the connection
* @hd_cport_id: host-device cport id, or -1 for dynamic allocation
* @intf: remote interface, or NULL for static connections
* @bundle: remote-interface bundle (may be NULL)
* @cport_id: remote-interface cport id, or 0 for static connections
- * @protocol_id: protocol id
+ * @handler: request handler (may be NULL)
+ * @flags: connection flags
*
* Create a Greybus connection, representing the bidirectional link
* between a CPort on a (local) Greybus host device and a CPort on
* A connection also maintains the state of operations sent over the
* connection.
*
- * Return: A pointer to the new connection if successful, or NULL otherwise.
+ * Serialised against concurrent create and destroy using the
+ * gb_connection_mutex.
+ *
+ * Return: A pointer to the new connection if successful, or an ERR_PTR
+ * otherwise.
*/
static struct gb_connection *
-gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
+_gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
struct gb_interface *intf,
struct gb_bundle *bundle, int cport_id,
- u8 protocol_id)
+ gb_request_handler_t handler,
+ unsigned long flags)
{
struct gb_connection *connection;
- struct ida *id_map = &hd->cport_id_map;
- int ida_start, ida_end;
- u8 major = 0;
- u8 minor = 1;
+ int ret;
- /*
- * If a manifest tries to reuse a cport, reject it. We
- * initialize connections serially so we don't need to worry
- * about holding the connection lock.
- */
- if (bundle && gb_connection_intf_find(bundle->intf, cport_id)) {
- dev_err(&bundle->dev, "cport %u already connected\n",
- cport_id);
- return NULL;
- }
+ mutex_lock(&gb_connection_mutex);
- if (hd_cport_id < 0) {
- ida_start = 0;
- ida_end = hd->num_cports;
- } else if (hd_cport_id < hd->num_cports) {
- ida_start = hd_cport_id;
- ida_end = hd_cport_id + 1;
- } else {
- dev_err(&hd->dev, "cport %d not available\n", hd_cport_id);
- return NULL;
+ if (intf && gb_connection_intf_find(intf, cport_id)) {
+ dev_err(&intf->dev, "cport %u already in use\n", cport_id);
+ ret = -EBUSY;
+ goto err_unlock;
}
- hd_cport_id = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
- if (hd_cport_id < 0)
- return NULL;
+ ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
+ if (ret < 0) {
+ dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
+ goto err_unlock;
+ }
+ hd_cport_id = ret;
connection = kzalloc(sizeof(*connection), GFP_KERNEL);
- if (!connection)
- goto err_remove_ida;
+ if (!connection) {
+ ret = -ENOMEM;
+ goto err_hd_cport_release;
+ }
connection->hd_cport_id = hd_cport_id;
connection->intf_cport_id = cport_id;
connection->hd = hd;
connection->intf = intf;
-
- connection->protocol_id = protocol_id;
- connection->major = major;
- connection->minor = minor;
-
connection->bundle = bundle;
+ connection->handler = handler;
+ connection->flags = flags;
+ if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
+ connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
connection->state = GB_CONNECTION_STATE_DISABLED;
atomic_set(&connection->op_cycle, 0);
connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
dev_name(&hd->dev), hd_cport_id);
- if (!connection->wq)
+ if (!connection->wq) {
+ ret = -ENOMEM;
goto err_free_connection;
+ }
kref_init(&connection->kref);
spin_unlock_irq(&gb_connections_lock);
+ mutex_unlock(&gb_connection_mutex);
+
return connection;
err_free_connection:
kfree(connection);
-err_remove_ida:
- ida_simple_remove(id_map, hd_cport_id);
+err_hd_cport_release:
+ gb_hd_cport_release(hd, hd_cport_id);
+err_unlock:
+ mutex_unlock(&gb_connection_mutex);
- return NULL;
+ return ERR_PTR(ret);
+}
+
+struct gb_connection *
+gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
+ gb_request_handler_t handler)
+{
+ return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
+ 0);
+}
+
+struct gb_connection *
+gb_connection_create_control(struct gb_interface *intf)
+{
+ return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL, 0);
+}
+
+struct gb_connection *
+gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
+ gb_request_handler_t handler)
+{
+ struct gb_interface *intf = bundle->intf;
+
+ return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
+ handler, 0);
}
+EXPORT_SYMBOL_GPL(gb_connection_create);
struct gb_connection *
-gb_connection_create_static(struct gb_host_device *hd,
- u16 hd_cport_id, u8 protocol_id)
+gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
+ gb_request_handler_t handler,
+ unsigned long flags)
{
- return gb_connection_create(hd, hd_cport_id, NULL, NULL, 0,
- protocol_id);
+ struct gb_interface *intf = bundle->intf;
+
+ return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
+ handler, flags);
}
+EXPORT_SYMBOL_GPL(gb_connection_create_flags);
struct gb_connection *
-gb_connection_create_dynamic(struct gb_interface *intf,
- struct gb_bundle *bundle,
- u16 cport_id, u8 protocol_id)
+gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
+ unsigned long flags)
{
- return gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
- protocol_id);
+ struct gb_interface *intf = bundle->intf;
+
+ flags |= GB_CONNECTION_FLAG_OFFLOADED;
+
+ return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
+ NULL, flags);
}
+EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
static int gb_connection_hd_cport_enable(struct gb_connection *connection)
{
hd->driver->cport_disable(hd, connection->hd_cport_id);
}
+static int
+gb_connection_hd_cport_features_enable(struct gb_connection *connection)
+{
+ struct gb_host_device *hd = connection->hd;
+ int ret;
+
+ if (!hd->driver->cport_features_enable)
+ return 0;
+
+ ret = hd->driver->cport_features_enable(hd, connection->hd_cport_id);
+ if (ret) {
+ dev_err(&hd->dev, "%s: failed to enable CPort features: %d\n",
+ connection->name, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+gb_connection_hd_cport_features_disable(struct gb_connection *connection)
+{
+ struct gb_host_device *hd = connection->hd;
+
+ if (!hd->driver->cport_features_disable)
+ return;
+
+ hd->driver->cport_features_disable(hd, connection->hd_cport_id);
+}
+
/*
* Request the SVC to create a connection from AP's cport to interface's
* cport.
{
struct gb_host_device *hd = connection->hd;
struct gb_interface *intf;
+ u8 cport_flags;
int ret;
if (gb_connection_is_static(connection))
- return 0;
+ return gb_connection_hd_cport_features_enable(connection);
intf = connection->intf;
+
+ /*
+ * Enable either E2EFC or CSD, unless no flow control is requested.
+ */
+ cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
+ if (gb_connection_flow_control_disabled(connection)) {
+ cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
+ } else if (gb_connection_e2efc_enabled(connection)) {
+ cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
+ GB_SVC_CPORT_FLAG_E2EFC;
+ }
+
ret = gb_svc_connection_create(hd->svc,
hd->svc->ap_intf_id,
connection->hd_cport_id,
intf->interface_id,
connection->intf_cport_id,
- intf->boot_over_unipro);
+ cport_flags);
if (ret) {
dev_err(&connection->hd->dev,
"%s: failed to create svc connection: %d\n",
return ret;
}
+ ret = gb_connection_hd_cport_features_enable(connection);
+ if (ret) {
+ gb_svc_connection_destroy(hd->svc, hd->svc->ap_intf_id,
+ connection->hd_cport_id,
+ intf->interface_id,
+ connection->intf_cport_id);
+ return ret;
+ }
+
return 0;
}
static void
gb_connection_svc_connection_destroy(struct gb_connection *connection)
{
+ gb_connection_hd_cport_features_disable(connection);
+
if (gb_connection_is_static(connection))
return;
/* Inform Interface about active CPorts */
static int gb_connection_control_connected(struct gb_connection *connection)
{
- struct gb_protocol *protocol = connection->protocol;
struct gb_control *control;
u16 cport_id = connection->intf_cport_id;
int ret;
- if (protocol->flags & GB_PROTOCOL_SKIP_CONTROL_CONNECTED)
+ if (gb_connection_is_static(connection))
return 0;
- control = connection->bundle->intf->control;
+ control = connection->intf->control;
+
+ if (connection == control->connection)
+ return 0;
ret = gb_control_connected_operation(control, cport_id);
if (ret) {
static void
gb_connection_control_disconnected(struct gb_connection *connection)
{
- struct gb_protocol *protocol = connection->protocol;
struct gb_control *control;
u16 cport_id = connection->intf_cport_id;
int ret;
- if (protocol->flags & GB_PROTOCOL_SKIP_CONTROL_DISCONNECTED)
+ if (gb_connection_is_static(connection))
return;
- control = connection->bundle->intf->control;
+ control = connection->intf->control;
+
+ if (connection == control->connection)
+ return;
ret = gb_control_disconnected_operation(control, cport_id);
if (ret) {
}
}
-/*
- * Request protocol version supported by the module. We don't need to do
- * this for SVC as that is initiated by the SVC.
- */
-static int gb_connection_protocol_get_version(struct gb_connection *connection)
-{
- struct gb_protocol *protocol = connection->protocol;
- int ret;
-
- if (protocol->flags & GB_PROTOCOL_SKIP_VERSION)
- return 0;
-
- ret = gb_protocol_get_version(connection);
- if (ret) {
- dev_err(&connection->hd->dev,
- "%s: failed to get protocol version: %d\n",
- connection->name, ret);
- return ret;
- }
-
- return 0;
-}
-
/*
* Cancel all active operations on a connection.
*
*/
static void gb_connection_cancel_operations(struct gb_connection *connection,
int errno)
+ __must_hold(&connection->lock)
{
struct gb_operation *operation;
}
}
-int gb_connection_enable(struct gb_connection *connection,
- gb_request_handler_t handler)
+/*
+ * Cancel all active incoming operations on a connection.
+ *
+ * Locking: Called with connection lock held and state set to ENABLED_TX.
+ */
+static void
+gb_connection_flush_incoming_operations(struct gb_connection *connection,
+ int errno)
+ __must_hold(&connection->lock)
{
- int ret;
+ struct gb_operation *operation;
+ bool incoming;
- mutex_lock(&connection->mutex);
+ while (!list_empty(&connection->operations)) {
+ incoming = false;
+ list_for_each_entry(operation, &connection->operations,
+ links) {
+ if (gb_operation_is_incoming(operation)) {
+ gb_operation_get(operation);
+ incoming = true;
+ break;
+ }
+ }
+
+ if (!incoming)
+ break;
- if (connection->state == GB_CONNECTION_STATE_ENABLED)
- goto out_unlock;
+ spin_unlock_irq(&connection->lock);
+
+ /* FIXME: flush, not cancel? */
+ gb_operation_cancel_incoming(operation, errno);
+ gb_operation_put(operation);
+
+ spin_lock_irq(&connection->lock);
+ }
+}
+
+/*
+ * _gb_connection_enable() - enable a connection
+ * @connection: connection to enable
+ * @rx: whether to enable incoming requests
+ *
+ * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
+ * ENABLED_TX->ENABLED state transitions.
+ *
+ * Locking: Caller holds connection->mutex.
+ */
+static int _gb_connection_enable(struct gb_connection *connection, bool rx)
+{
+ int ret;
+ /* Handle ENABLED_TX -> ENABLED transitions. */
if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
- if (!handler)
- goto out_unlock;
+ if (!(connection->handler && rx))
+ return 0;
spin_lock_irq(&connection->lock);
- connection->handler = handler;
connection->state = GB_CONNECTION_STATE_ENABLED;
spin_unlock_irq(&connection->lock);
- goto out_unlock;
+ return 0;
}
ret = gb_connection_hd_cport_enable(connection);
if (ret)
- goto err_unlock;
+ return ret;
ret = gb_connection_svc_connection_create(connection);
if (ret)
goto err_hd_cport_disable;
spin_lock_irq(&connection->lock);
- connection->handler = handler;
- if (handler)
+ if (connection->handler && rx)
connection->state = GB_CONNECTION_STATE_ENABLED;
else
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
if (ret)
goto err_svc_destroy;
-out_unlock:
- mutex_unlock(&connection->mutex);
-
return 0;
err_svc_destroy:
spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_DISABLED;
gb_connection_cancel_operations(connection, -ESHUTDOWN);
- connection->handler = NULL;
spin_unlock_irq(&connection->lock);
gb_connection_svc_connection_destroy(connection);
err_hd_cport_disable:
gb_connection_hd_cport_disable(connection);
-err_unlock:
- mutex_unlock(&connection->mutex);
return ret;
}
-EXPORT_SYMBOL_GPL(gb_connection_enable);
-void gb_connection_disable(struct gb_connection *connection)
+int gb_connection_enable(struct gb_connection *connection)
{
+ int ret = 0;
+
mutex_lock(&connection->mutex);
- if (connection->state == GB_CONNECTION_STATE_DISABLED)
+ if (connection->state == GB_CONNECTION_STATE_ENABLED)
goto out_unlock;
- gb_connection_control_disconnected(connection);
-
- spin_lock_irq(&connection->lock);
- connection->state = GB_CONNECTION_STATE_DISABLED;
- gb_connection_cancel_operations(connection, -ESHUTDOWN);
- connection->handler = NULL;
- spin_unlock_irq(&connection->lock);
-
- gb_connection_svc_connection_destroy(connection);
- gb_connection_hd_cport_disable(connection);
-
+ ret = _gb_connection_enable(connection, true);
out_unlock:
mutex_unlock(&connection->mutex);
-}
-EXPORT_SYMBOL_GPL(gb_connection_disable);
-
-static int gb_legacy_request_handler(struct gb_operation *operation)
-{
- struct gb_protocol *protocol = operation->connection->protocol;
- return protocol->request_recv(operation->type, operation);
+ return ret;
}
+EXPORT_SYMBOL_GPL(gb_connection_enable);
-int gb_connection_legacy_init(struct gb_connection *connection)
+int gb_connection_enable_tx(struct gb_connection *connection)
{
- gb_request_handler_t handler;
- int ret;
+ int ret = 0;
- ret = gb_connection_bind_protocol(connection);
- if (ret)
- return ret;
+ mutex_lock(&connection->mutex);
- if (connection->protocol->request_recv)
- handler = gb_legacy_request_handler;
- else
- handler = NULL;
+ if (connection->state == GB_CONNECTION_STATE_ENABLED) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
- ret = gb_connection_enable(connection, handler);
- if (ret)
- goto err_unbind_protocol;
+ if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
+ goto out_unlock;
- ret = gb_connection_protocol_get_version(connection);
- if (ret)
- goto err_disable;
+ ret = _gb_connection_enable(connection, false);
+out_unlock:
+ mutex_unlock(&connection->mutex);
- ret = connection->protocol->connection_init(connection);
- if (ret)
- goto err_disable;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
- return 0;
+void gb_connection_disable_rx(struct gb_connection *connection)
+{
+ mutex_lock(&connection->mutex);
-err_disable:
- gb_connection_disable(connection);
-err_unbind_protocol:
- gb_connection_unbind_protocol(connection);
+ spin_lock_irq(&connection->lock);
+ if (connection->state != GB_CONNECTION_STATE_ENABLED) {
+ spin_unlock_irq(&connection->lock);
+ goto out_unlock;
+ }
+ connection->state = GB_CONNECTION_STATE_ENABLED_TX;
+ gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
+ spin_unlock_irq(&connection->lock);
- return ret;
+out_unlock:
+ mutex_unlock(&connection->mutex);
}
-EXPORT_SYMBOL_GPL(gb_connection_legacy_init);
+EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
-void gb_connection_legacy_exit(struct gb_connection *connection)
+void gb_connection_disable(struct gb_connection *connection)
{
+ mutex_lock(&connection->mutex);
+
if (connection->state == GB_CONNECTION_STATE_DISABLED)
- return;
+ goto out_unlock;
- gb_connection_disable(connection);
+ gb_connection_control_disconnected(connection);
- connection->protocol->connection_exit(connection);
+ spin_lock_irq(&connection->lock);
+ connection->state = GB_CONNECTION_STATE_DISABLED;
+ gb_connection_cancel_operations(connection, -ESHUTDOWN);
+ spin_unlock_irq(&connection->lock);
- gb_connection_unbind_protocol(connection);
+ gb_connection_svc_connection_destroy(connection);
+ gb_connection_hd_cport_disable(connection);
+
+out_unlock:
+ mutex_unlock(&connection->mutex);
}
-EXPORT_SYMBOL_GPL(gb_connection_legacy_exit);
+EXPORT_SYMBOL_GPL(gb_connection_disable);
-/*
- * Tear down a previously set up connection.
- */
+/* Caller must have disabled the connection before destroying it. */
void gb_connection_destroy(struct gb_connection *connection)
{
- struct ida *id_map;
-
- if (WARN_ON(!connection))
+ if (!connection)
return;
+ mutex_lock(&gb_connection_mutex);
+
spin_lock_irq(&gb_connections_lock);
list_del(&connection->bundle_links);
list_del(&connection->hd_links);
spin_unlock_irq(&gb_connections_lock);
- id_map = &connection->hd->cport_id_map;
- ida_simple_remove(id_map, connection->hd_cport_id);
+ destroy_workqueue(connection->wq);
+
+ gb_hd_cport_release(connection->hd, connection->hd_cport_id);
connection->hd_cport_id = CPORT_ID_BAD;
- kref_put_mutex(&connection->kref, gb_connection_kref_release,
- &connection_mutex);
+ mutex_unlock(&gb_connection_mutex);
+
+ gb_connection_put(connection);
}
+EXPORT_SYMBOL_GPL(gb_connection_destroy);
void gb_connection_latency_tag_enable(struct gb_connection *connection)
{
}
}
EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
-
-static int gb_connection_bind_protocol(struct gb_connection *connection)
-{
- struct gb_protocol *protocol;
-
- protocol = gb_protocol_get(connection->protocol_id,
- connection->major,
- connection->minor);
- if (!protocol) {
- dev_err(&connection->hd->dev,
- "protocol 0x%02x version %u.%u not found\n",
- connection->protocol_id,
- connection->major, connection->minor);
- return -EPROTONOSUPPORT;
- }
- connection->protocol = protocol;
-
- return 0;
-}
-
-static void gb_connection_unbind_protocol(struct gb_connection *connection)
-{
- struct gb_protocol *protocol = connection->protocol;
-
- gb_protocol_put(protocol);
-
- connection->protocol = NULL;
-}