4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/workqueue.h>
14 static DEFINE_SPINLOCK(gb_connections_lock);
16 /* This is only used at initialization time; no locking is required. */
17 static struct gb_connection *
18 gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
20 struct gb_host_device *hd = intf->hd;
21 struct gb_connection *connection;
23 list_for_each_entry(connection, &hd->connections, hd_links)
24 if (connection->bundle->intf == intf &&
25 connection->intf_cport_id == cport_id)
30 static struct gb_connection *
31 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
33 struct gb_connection *connection;
36 spin_lock_irqsave(&gb_connections_lock, flags);
37 list_for_each_entry(connection, &hd->connections, hd_links)
38 if (connection->hd_cport_id == cport_id)
42 spin_unlock_irqrestore(&gb_connections_lock, flags);
48 * Callback from the host driver to let us know that data has been
49 * received on the bundle.
51 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
52 u8 *data, size_t length)
54 struct gb_connection *connection;
56 connection = gb_connection_hd_find(hd, cport_id);
59 "nonexistent connection (%zu bytes dropped)\n", length);
62 gb_connection_recv(connection, data, length);
64 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
66 static DEFINE_MUTEX(connection_mutex);
68 static void gb_connection_kref_release(struct kref *kref)
70 struct gb_connection *connection;
72 connection = container_of(kref, struct gb_connection, kref);
73 destroy_workqueue(connection->wq);
75 mutex_unlock(&connection_mutex);
78 int svc_update_connection(struct gb_interface *intf,
79 struct gb_connection *connection)
81 struct gb_bundle *bundle;
83 bundle = gb_bundle_create(intf, GB_SVC_BUNDLE_ID, GREYBUS_CLASS_SVC);
87 connection->bundle = bundle;
89 spin_lock_irq(&gb_connections_lock);
90 list_add(&connection->bundle_links, &bundle->connections);
91 spin_unlock_irq(&gb_connections_lock);
97 * Set up a Greybus connection, representing the bidirectional link
98 * between a CPort on a (local) Greybus host device and a CPort on
99 * another Greybus module.
101 * A connection also maintains the state of operations sent over the
104 * Returns a pointer to the new connection if successful, or a null
107 struct gb_connection *
108 gb_connection_create_range(struct gb_host_device *hd,
109 struct gb_bundle *bundle, struct device *parent,
110 u16 cport_id, u8 protocol_id, u32 ida_start,
113 struct gb_connection *connection;
114 struct ida *id_map = &hd->cport_id_map;
121 * If a manifest tries to reuse a cport, reject it. We
122 * initialize connections serially so we don't need to worry
123 * about holding the connection lock.
125 if (bundle && gb_connection_intf_find(bundle->intf, cport_id)) {
126 dev_err(&bundle->dev, "cport 0x%04hx already connected\n",
131 hd_cport_id = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
135 connection = kzalloc(sizeof(*connection), GFP_KERNEL);
139 connection->hd_cport_id = hd_cport_id;
140 connection->intf_cport_id = cport_id;
143 connection->protocol_id = protocol_id;
144 connection->major = major;
145 connection->minor = minor;
147 connection->bundle = bundle;
148 connection->state = GB_CONNECTION_STATE_DISABLED;
150 atomic_set(&connection->op_cycle, 0);
151 spin_lock_init(&connection->lock);
152 INIT_LIST_HEAD(&connection->operations);
154 connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
155 dev_name(parent), cport_id);
157 goto err_free_connection;
159 kref_init(&connection->kref);
161 spin_lock_irq(&gb_connections_lock);
162 list_add(&connection->hd_links, &hd->connections);
165 list_add(&connection->bundle_links, &bundle->connections);
167 INIT_LIST_HEAD(&connection->bundle_links);
169 spin_unlock_irq(&gb_connections_lock);
171 retval = gb_connection_bind_protocol(connection);
173 dev_err(parent, "%d: failed to bind protocol: %d\n",
175 gb_connection_destroy(connection);
184 ida_simple_remove(id_map, hd_cport_id);
189 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
191 struct gb_host_device *hd = connection->hd;
194 if (!hd->driver->cport_enable)
197 ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
200 "failed to enable host cport: %d\n", ret);
207 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
209 struct gb_host_device *hd = connection->hd;
211 if (!hd->driver->cport_disable)
214 hd->driver->cport_disable(hd, connection->hd_cport_id);
217 struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
218 u16 cport_id, u8 protocol_id)
220 return gb_connection_create_range(bundle->intf->hd, bundle,
221 &bundle->dev, cport_id, protocol_id,
222 0, bundle->intf->hd->num_cports);
226 * Cancel all active operations on a connection.
228 * Should only be called during connection tear down.
230 static void gb_connection_cancel_operations(struct gb_connection *connection,
233 struct gb_operation *operation;
235 spin_lock_irq(&connection->lock);
236 while (!list_empty(&connection->operations)) {
237 operation = list_last_entry(&connection->operations,
238 struct gb_operation, links);
239 gb_operation_get(operation);
240 spin_unlock_irq(&connection->lock);
242 if (gb_operation_is_incoming(operation))
243 gb_operation_cancel_incoming(operation, errno);
245 gb_operation_cancel(operation, errno);
247 gb_operation_put(operation);
249 spin_lock_irq(&connection->lock);
251 spin_unlock_irq(&connection->lock);
255 * Request the SVC to create a connection from AP's cport to interface's
259 gb_connection_svc_connection_create(struct gb_connection *connection)
261 struct gb_host_device *hd = connection->hd;
262 struct gb_protocol *protocol = connection->protocol;
263 struct gb_interface *intf;
266 if (protocol->flags & GB_PROTOCOL_SKIP_SVC_CONNECTION)
269 intf = connection->bundle->intf;
270 ret = gb_svc_connection_create(hd->svc,
271 hd->endo->ap_intf_id,
272 connection->hd_cport_id,
274 connection->intf_cport_id,
275 intf->boot_over_unipro);
277 dev_err(&connection->bundle->dev,
278 "failed to create svc connection: %d\n", ret);
286 gb_connection_svc_connection_destroy(struct gb_connection *connection)
288 if (connection->protocol->flags & GB_PROTOCOL_SKIP_SVC_CONNECTION)
291 gb_svc_connection_destroy(connection->hd->svc,
292 connection->hd->endo->ap_intf_id,
293 connection->hd_cport_id,
294 connection->bundle->intf->interface_id,
295 connection->intf_cport_id);
298 /* Inform Interface about active CPorts */
299 static int gb_connection_control_connected(struct gb_connection *connection)
301 struct gb_protocol *protocol = connection->protocol;
302 struct gb_control *control;
303 u16 cport_id = connection->intf_cport_id;
306 if (protocol->flags & GB_PROTOCOL_SKIP_CONTROL_CONNECTED)
309 control = connection->bundle->intf->control;
311 ret = gb_control_connected_operation(control, cport_id);
313 dev_err(&connection->bundle->dev,
314 "failed to connect cport: %d\n", ret);
321 /* Inform Interface about inactive CPorts */
323 gb_connection_control_disconnected(struct gb_connection *connection)
325 struct gb_protocol *protocol = connection->protocol;
326 struct gb_control *control;
327 u16 cport_id = connection->intf_cport_id;
330 if (protocol->flags & GB_PROTOCOL_SKIP_CONTROL_DISCONNECTED)
333 control = connection->bundle->intf->control;
335 ret = gb_control_disconnected_operation(control, cport_id);
337 dev_warn(&connection->bundle->dev,
338 "failed to disconnect cport: %d\n", ret);
343 * Request protocol version supported by the module. We don't need to do
344 * this for SVC as that is initiated by the SVC.
346 static int gb_connection_protocol_get_version(struct gb_connection *connection)
348 struct gb_protocol *protocol = connection->protocol;
351 if (protocol->flags & GB_PROTOCOL_SKIP_VERSION)
354 ret = gb_protocol_get_version(connection);
356 dev_err(&connection->bundle->dev,
357 "failed to get protocol version: %d\n", ret);
364 static int gb_connection_init(struct gb_connection *connection)
366 struct gb_protocol *protocol = connection->protocol;
369 ret = gb_connection_hd_cport_enable(connection);
373 ret = gb_connection_svc_connection_create(connection);
375 goto err_hd_cport_disable;
377 ret = gb_connection_control_connected(connection);
379 goto err_svc_destroy;
381 /* Need to enable the connection to initialize it */
382 spin_lock_irq(&connection->lock);
383 connection->state = GB_CONNECTION_STATE_ENABLED;
384 spin_unlock_irq(&connection->lock);
386 ret = gb_connection_protocol_get_version(connection);
390 ret = protocol->connection_init(connection);
397 spin_lock_irq(&connection->lock);
398 connection->state = GB_CONNECTION_STATE_ERROR;
399 spin_unlock_irq(&connection->lock);
401 gb_connection_control_disconnected(connection);
403 gb_connection_svc_connection_destroy(connection);
404 err_hd_cport_disable:
405 gb_connection_hd_cport_disable(connection);
410 static void gb_connection_exit(struct gb_connection *connection)
412 if (!connection->protocol)
415 spin_lock_irq(&connection->lock);
416 if (connection->state != GB_CONNECTION_STATE_ENABLED) {
417 spin_unlock_irq(&connection->lock);
420 connection->state = GB_CONNECTION_STATE_DESTROYING;
421 spin_unlock_irq(&connection->lock);
423 gb_connection_cancel_operations(connection, -ESHUTDOWN);
425 connection->protocol->connection_exit(connection);
426 gb_connection_control_disconnected(connection);
427 gb_connection_svc_connection_destroy(connection);
428 gb_connection_hd_cport_disable(connection);
432 * Tear down a previously set up connection.
434 void gb_connection_destroy(struct gb_connection *connection)
438 if (WARN_ON(!connection))
441 gb_connection_exit(connection);
443 spin_lock_irq(&gb_connections_lock);
444 list_del(&connection->bundle_links);
445 list_del(&connection->hd_links);
446 spin_unlock_irq(&gb_connections_lock);
448 if (connection->protocol)
449 gb_protocol_put(connection->protocol);
450 connection->protocol = NULL;
452 id_map = &connection->hd->cport_id_map;
453 ida_simple_remove(id_map, connection->hd_cport_id);
454 connection->hd_cport_id = CPORT_ID_BAD;
456 kref_put_mutex(&connection->kref, gb_connection_kref_release,
460 void gb_connection_latency_tag_enable(struct gb_connection *connection)
462 struct gb_host_device *hd = connection->hd;
465 if (!hd->driver->latency_tag_enable)
468 ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
470 dev_err(&connection->bundle->dev,
471 "failed to enable latency tag: %d\n", ret);
474 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
476 void gb_connection_latency_tag_disable(struct gb_connection *connection)
478 struct gb_host_device *hd = connection->hd;
481 if (!hd->driver->latency_tag_disable)
484 ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
486 dev_err(&connection->bundle->dev,
487 "failed to disable latency tag: %d\n", ret);
490 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
492 int gb_connection_bind_protocol(struct gb_connection *connection)
494 struct gb_protocol *protocol;
497 /* If we already have a protocol bound here, just return */
498 if (connection->protocol)
501 protocol = gb_protocol_get(connection->protocol_id,
505 dev_warn(&connection->hd->dev,
506 "protocol 0x%02hhx version %hhu.%hhu not found\n",
507 connection->protocol_id,
508 connection->major, connection->minor);
511 connection->protocol = protocol;
514 * If we have a valid device_id for the interface block, then we have an
515 * active device, so bring up the connection at the same time.
517 if ((!connection->bundle &&
518 protocol->flags & GB_PROTOCOL_NO_BUNDLE) ||
519 connection->bundle->intf->device_id != GB_DEVICE_ID_BAD) {
520 ret = gb_connection_init(connection);
522 gb_protocol_put(protocol);
523 connection->protocol = NULL;