4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/workqueue.h>
15 static int gb_connection_bind_protocol(struct gb_connection *connection);
16 static void gb_connection_unbind_protocol(struct gb_connection *connection);
19 static DEFINE_SPINLOCK(gb_connections_lock);
21 /* This is only used at initialization time; no locking is required. */
22 static struct gb_connection *
23 gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
25 struct gb_host_device *hd = intf->hd;
26 struct gb_connection *connection;
28 list_for_each_entry(connection, &hd->connections, hd_links) {
29 if (connection->intf == intf &&
30 connection->intf_cport_id == cport_id)
37 static struct gb_connection *
38 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
40 struct gb_connection *connection;
43 spin_lock_irqsave(&gb_connections_lock, flags);
44 list_for_each_entry(connection, &hd->connections, hd_links)
45 if (connection->hd_cport_id == cport_id)
49 spin_unlock_irqrestore(&gb_connections_lock, flags);
55 * Callback from the host driver to let us know that data has been
56 * received on the bundle.
58 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
59 u8 *data, size_t length)
61 struct gb_connection *connection;
63 connection = gb_connection_hd_find(hd, cport_id);
66 "nonexistent connection (%zu bytes dropped)\n", length);
69 gb_connection_recv(connection, data, length);
71 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
73 static DEFINE_MUTEX(connection_mutex);
75 static void gb_connection_kref_release(struct kref *kref)
77 struct gb_connection *connection;
79 connection = container_of(kref, struct gb_connection, kref);
80 destroy_workqueue(connection->wq);
82 mutex_unlock(&connection_mutex);
85 static void gb_connection_init_name(struct gb_connection *connection)
87 u16 hd_cport_id = connection->hd_cport_id;
91 if (connection->intf) {
92 intf_id = connection->intf->interface_id;
93 cport_id = connection->intf_cport_id;
96 snprintf(connection->name, sizeof(connection->name),
97 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
101 * gb_connection_create() - create a Greybus connection
102 * @hd: host device of the connection
103 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
104 * @intf: remote interface, or NULL for static connections
105 * @bundle: remote-interface bundle (may be NULL)
106 * @cport_id: remote-interface cport id, or 0 for static connections
107 * @protocol_id: protocol id
109 * Create a Greybus connection, representing the bidirectional link
110 * between a CPort on a (local) Greybus host device and a CPort on
111 * another Greybus interface.
113 * A connection also maintains the state of operations sent over the
116 * Return: A pointer to the new connection if successful, or NULL otherwise.
118 static struct gb_connection *
119 gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
120 struct gb_interface *intf,
121 struct gb_bundle *bundle, int cport_id,
124 struct gb_connection *connection;
125 struct ida *id_map = &hd->cport_id_map;
126 int ida_start, ida_end;
131 * If a manifest tries to reuse a cport, reject it. We
132 * initialize connections serially so we don't need to worry
133 * about holding the connection lock.
135 if (bundle && gb_connection_intf_find(bundle->intf, cport_id)) {
136 dev_err(&bundle->dev, "cport 0x%04x already connected\n",
141 if (hd_cport_id < 0) {
143 ida_end = hd->num_cports;
144 } else if (hd_cport_id < hd->num_cports) {
145 ida_start = hd_cport_id;
146 ida_end = hd_cport_id + 1;
148 dev_err(&hd->dev, "cport %d not available\n", hd_cport_id);
152 hd_cport_id = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
156 connection = kzalloc(sizeof(*connection), GFP_KERNEL);
160 connection->hd_cport_id = hd_cport_id;
161 connection->intf_cport_id = cport_id;
163 connection->intf = intf;
165 connection->protocol_id = protocol_id;
166 connection->major = major;
167 connection->minor = minor;
169 connection->bundle = bundle;
170 connection->state = GB_CONNECTION_STATE_DISABLED;
172 atomic_set(&connection->op_cycle, 0);
173 spin_lock_init(&connection->lock);
174 INIT_LIST_HEAD(&connection->operations);
176 connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
177 dev_name(&hd->dev), hd_cport_id);
179 goto err_free_connection;
181 kref_init(&connection->kref);
183 gb_connection_init_name(connection);
185 spin_lock_irq(&gb_connections_lock);
186 list_add(&connection->hd_links, &hd->connections);
189 list_add(&connection->bundle_links, &bundle->connections);
191 INIT_LIST_HEAD(&connection->bundle_links);
193 spin_unlock_irq(&gb_connections_lock);
200 ida_simple_remove(id_map, hd_cport_id);
205 struct gb_connection *
206 gb_connection_create_static(struct gb_host_device *hd,
207 u16 hd_cport_id, u8 protocol_id)
209 return gb_connection_create(hd, hd_cport_id, NULL, NULL, 0,
213 struct gb_connection *
214 gb_connection_create_dynamic(struct gb_interface *intf,
215 struct gb_bundle *bundle,
216 u16 cport_id, u8 protocol_id)
218 return gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
222 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
224 struct gb_host_device *hd = connection->hd;
227 if (!hd->driver->cport_enable)
230 ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
233 "failed to enable host cport: %d\n", ret);
240 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
242 struct gb_host_device *hd = connection->hd;
244 if (!hd->driver->cport_disable)
247 hd->driver->cport_disable(hd, connection->hd_cport_id);
251 * Cancel all active operations on a connection.
253 * Should only be called during connection tear down.
255 static void gb_connection_cancel_operations(struct gb_connection *connection,
258 struct gb_operation *operation;
260 spin_lock_irq(&connection->lock);
261 while (!list_empty(&connection->operations)) {
262 operation = list_last_entry(&connection->operations,
263 struct gb_operation, links);
264 gb_operation_get(operation);
265 spin_unlock_irq(&connection->lock);
267 if (gb_operation_is_incoming(operation))
268 gb_operation_cancel_incoming(operation, errno);
270 gb_operation_cancel(operation, errno);
272 gb_operation_put(operation);
274 spin_lock_irq(&connection->lock);
276 spin_unlock_irq(&connection->lock);
280 * Request the SVC to create a connection from AP's cport to interface's
284 gb_connection_svc_connection_create(struct gb_connection *connection)
286 struct gb_host_device *hd = connection->hd;
287 struct gb_interface *intf;
290 if (gb_connection_is_static(connection))
293 intf = connection->intf;
294 ret = gb_svc_connection_create(hd->svc,
296 connection->hd_cport_id,
298 connection->intf_cport_id,
299 intf->boot_over_unipro);
301 dev_err(&connection->hd->dev,
302 "%s: failed to create svc connection: %d\n",
303 connection->name, ret);
311 gb_connection_svc_connection_destroy(struct gb_connection *connection)
313 if (gb_connection_is_static(connection))
316 gb_svc_connection_destroy(connection->hd->svc,
317 connection->hd->svc->ap_intf_id,
318 connection->hd_cport_id,
319 connection->intf->interface_id,
320 connection->intf_cport_id);
323 /* Inform Interface about active CPorts */
324 static int gb_connection_control_connected(struct gb_connection *connection)
326 struct gb_protocol *protocol = connection->protocol;
327 struct gb_control *control;
328 u16 cport_id = connection->intf_cport_id;
331 if (protocol->flags & GB_PROTOCOL_SKIP_CONTROL_CONNECTED)
334 control = connection->bundle->intf->control;
336 ret = gb_control_connected_operation(control, cport_id);
338 dev_err(&connection->bundle->dev,
339 "failed to connect cport: %d\n", ret);
346 /* Inform Interface about inactive CPorts */
348 gb_connection_control_disconnected(struct gb_connection *connection)
350 struct gb_protocol *protocol = connection->protocol;
351 struct gb_control *control;
352 u16 cport_id = connection->intf_cport_id;
355 if (protocol->flags & GB_PROTOCOL_SKIP_CONTROL_DISCONNECTED)
358 control = connection->bundle->intf->control;
360 ret = gb_control_disconnected_operation(control, cport_id);
362 dev_warn(&connection->bundle->dev,
363 "failed to disconnect cport: %d\n", ret);
368 * Request protocol version supported by the module. We don't need to do
369 * this for SVC as that is initiated by the SVC.
371 static int gb_connection_protocol_get_version(struct gb_connection *connection)
373 struct gb_protocol *protocol = connection->protocol;
376 if (protocol->flags & GB_PROTOCOL_SKIP_VERSION)
379 ret = gb_protocol_get_version(connection);
381 dev_err(&connection->bundle->dev,
382 "failed to get protocol version: %d\n", ret);
389 int gb_connection_init(struct gb_connection *connection)
393 ret = gb_connection_bind_protocol(connection);
397 ret = gb_connection_hd_cport_enable(connection);
399 goto err_unbind_protocol;
401 ret = gb_connection_svc_connection_create(connection);
403 goto err_hd_cport_disable;
405 ret = gb_connection_control_connected(connection);
407 goto err_svc_destroy;
409 /* Need to enable the connection to initialize it */
410 spin_lock_irq(&connection->lock);
411 connection->state = GB_CONNECTION_STATE_ENABLED;
412 spin_unlock_irq(&connection->lock);
414 ret = gb_connection_protocol_get_version(connection);
418 ret = connection->protocol->connection_init(connection);
425 spin_lock_irq(&connection->lock);
426 connection->state = GB_CONNECTION_STATE_ERROR;
427 spin_unlock_irq(&connection->lock);
429 gb_connection_control_disconnected(connection);
431 gb_connection_svc_connection_destroy(connection);
432 err_hd_cport_disable:
433 gb_connection_hd_cport_disable(connection);
435 gb_connection_unbind_protocol(connection);
440 static void gb_connection_exit(struct gb_connection *connection)
442 spin_lock_irq(&connection->lock);
443 if (connection->state != GB_CONNECTION_STATE_ENABLED) {
444 spin_unlock_irq(&connection->lock);
447 connection->state = GB_CONNECTION_STATE_DESTROYING;
448 spin_unlock_irq(&connection->lock);
450 gb_connection_cancel_operations(connection, -ESHUTDOWN);
452 connection->protocol->connection_exit(connection);
453 gb_connection_control_disconnected(connection);
454 gb_connection_svc_connection_destroy(connection);
455 gb_connection_hd_cport_disable(connection);
456 gb_connection_unbind_protocol(connection);
460 * Tear down a previously set up connection.
462 void gb_connection_destroy(struct gb_connection *connection)
466 if (WARN_ON(!connection))
469 gb_connection_exit(connection);
471 spin_lock_irq(&gb_connections_lock);
472 list_del(&connection->bundle_links);
473 list_del(&connection->hd_links);
474 spin_unlock_irq(&gb_connections_lock);
476 id_map = &connection->hd->cport_id_map;
477 ida_simple_remove(id_map, connection->hd_cport_id);
478 connection->hd_cport_id = CPORT_ID_BAD;
480 kref_put_mutex(&connection->kref, gb_connection_kref_release,
484 void gb_connection_latency_tag_enable(struct gb_connection *connection)
486 struct gb_host_device *hd = connection->hd;
489 if (!hd->driver->latency_tag_enable)
492 ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
494 dev_err(&connection->hd->dev,
495 "%s: failed to enable latency tag: %d\n",
496 connection->name, ret);
499 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
501 void gb_connection_latency_tag_disable(struct gb_connection *connection)
503 struct gb_host_device *hd = connection->hd;
506 if (!hd->driver->latency_tag_disable)
509 ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
511 dev_err(&connection->hd->dev,
512 "%s: failed to disable latency tag: %d\n",
513 connection->name, ret);
516 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
518 static int gb_connection_bind_protocol(struct gb_connection *connection)
520 struct gb_protocol *protocol;
522 protocol = gb_protocol_get(connection->protocol_id,
526 dev_err(&connection->hd->dev,
527 "protocol 0x%02x version %u.%u not found\n",
528 connection->protocol_id,
529 connection->major, connection->minor);
530 return -EPROTONOSUPPORT;
532 connection->protocol = protocol;
537 static void gb_connection_unbind_protocol(struct gb_connection *connection)
539 struct gb_protocol *protocol = connection->protocol;
541 gb_protocol_put(protocol);
543 connection->protocol = NULL;