4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/workqueue.h>
15 static DEFINE_SPINLOCK(gb_connections_lock);
17 /* This is only used at initialization time; no locking is required. */
18 static struct gb_connection *
19 gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
21 struct gb_host_device *hd = intf->hd;
22 struct gb_connection *connection;
24 list_for_each_entry(connection, &hd->connections, hd_links) {
25 if (connection->intf == intf &&
26 connection->intf_cport_id == cport_id)
33 static struct gb_connection *
34 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
36 struct gb_connection *connection;
39 spin_lock_irqsave(&gb_connections_lock, flags);
40 list_for_each_entry(connection, &hd->connections, hd_links)
41 if (connection->hd_cport_id == cport_id)
45 spin_unlock_irqrestore(&gb_connections_lock, flags);
51 * Callback from the host driver to let us know that data has been
52 * received on the bundle.
54 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
55 u8 *data, size_t length)
57 struct gb_connection *connection;
59 connection = gb_connection_hd_find(hd, cport_id);
62 "nonexistent connection (%zu bytes dropped)\n", length);
65 gb_connection_recv(connection, data, length);
67 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
69 static DEFINE_MUTEX(connection_mutex);
71 static void gb_connection_kref_release(struct kref *kref)
73 struct gb_connection *connection;
75 connection = container_of(kref, struct gb_connection, kref);
76 destroy_workqueue(connection->wq);
78 mutex_unlock(&connection_mutex);
81 static void gb_connection_init_name(struct gb_connection *connection)
83 u16 hd_cport_id = connection->hd_cport_id;
87 if (connection->intf) {
88 intf_id = connection->intf->interface_id;
89 cport_id = connection->intf_cport_id;
92 snprintf(connection->name, sizeof(connection->name),
93 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
97 * gb_connection_create() - create a Greybus connection
98 * @hd: host device of the connection
99 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
100 * @intf: remote interface, or NULL for static connections
101 * @bundle: remote-interface bundle (may be NULL)
102 * @cport_id: remote-interface cport id, or 0 for static connections
103 * @protocol_id: protocol id
105 * Create a Greybus connection, representing the bidirectional link
106 * between a CPort on a (local) Greybus host device and a CPort on
107 * another Greybus interface.
109 * A connection also maintains the state of operations sent over the
112 * Return: A pointer to the new connection if successful, or NULL otherwise.
114 static struct gb_connection *
115 gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
116 struct gb_interface *intf,
117 struct gb_bundle *bundle, int cport_id,
120 struct gb_connection *connection;
121 struct ida *id_map = &hd->cport_id_map;
122 int ida_start, ida_end;
125 * If a manifest tries to reuse a cport, reject it. We
126 * initialize connections serially so we don't need to worry
127 * about holding the connection lock.
129 if (bundle && gb_connection_intf_find(bundle->intf, cport_id)) {
130 dev_err(&bundle->dev, "cport %u already connected\n",
135 if (hd_cport_id < 0) {
137 ida_end = hd->num_cports;
138 } else if (hd_cport_id < hd->num_cports) {
139 ida_start = hd_cport_id;
140 ida_end = hd_cport_id + 1;
142 dev_err(&hd->dev, "cport %d not available\n", hd_cport_id);
146 hd_cport_id = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
150 connection = kzalloc(sizeof(*connection), GFP_KERNEL);
154 connection->hd_cport_id = hd_cport_id;
155 connection->intf_cport_id = cport_id;
157 connection->intf = intf;
159 connection->protocol_id = protocol_id;
161 connection->bundle = bundle;
162 connection->state = GB_CONNECTION_STATE_DISABLED;
164 atomic_set(&connection->op_cycle, 0);
165 mutex_init(&connection->mutex);
166 spin_lock_init(&connection->lock);
167 INIT_LIST_HEAD(&connection->operations);
169 connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
170 dev_name(&hd->dev), hd_cport_id);
172 goto err_free_connection;
174 kref_init(&connection->kref);
176 gb_connection_init_name(connection);
178 spin_lock_irq(&gb_connections_lock);
179 list_add(&connection->hd_links, &hd->connections);
182 list_add(&connection->bundle_links, &bundle->connections);
184 INIT_LIST_HEAD(&connection->bundle_links);
186 spin_unlock_irq(&gb_connections_lock);
193 ida_simple_remove(id_map, hd_cport_id);
198 struct gb_connection *
199 gb_connection_create_static(struct gb_host_device *hd,
200 u16 hd_cport_id, u8 protocol_id)
202 return gb_connection_create(hd, hd_cport_id, NULL, NULL, 0,
206 struct gb_connection *
207 gb_connection_create_dynamic(struct gb_interface *intf,
208 struct gb_bundle *bundle,
209 u16 cport_id, u8 protocol_id)
211 return gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
215 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
217 struct gb_host_device *hd = connection->hd;
220 if (!hd->driver->cport_enable)
223 ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
226 "failed to enable host cport: %d\n", ret);
233 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
235 struct gb_host_device *hd = connection->hd;
237 if (!hd->driver->cport_disable)
240 hd->driver->cport_disable(hd, connection->hd_cport_id);
244 * Request the SVC to create a connection from AP's cport to interface's
248 gb_connection_svc_connection_create(struct gb_connection *connection)
250 struct gb_host_device *hd = connection->hd;
251 struct gb_interface *intf;
254 if (gb_connection_is_static(connection))
257 intf = connection->intf;
258 ret = gb_svc_connection_create(hd->svc,
260 connection->hd_cport_id,
262 connection->intf_cport_id,
263 intf->boot_over_unipro);
265 dev_err(&connection->hd->dev,
266 "%s: failed to create svc connection: %d\n",
267 connection->name, ret);
275 gb_connection_svc_connection_destroy(struct gb_connection *connection)
277 if (gb_connection_is_static(connection))
280 gb_svc_connection_destroy(connection->hd->svc,
281 connection->hd->svc->ap_intf_id,
282 connection->hd_cport_id,
283 connection->intf->interface_id,
284 connection->intf_cport_id);
287 /* Inform Interface about active CPorts */
288 static int gb_connection_control_connected(struct gb_connection *connection)
290 struct gb_control *control;
291 u16 cport_id = connection->intf_cport_id;
294 if (gb_connection_is_static(connection))
297 control = connection->intf->control;
299 if (connection == control->connection)
302 ret = gb_control_connected_operation(control, cport_id);
304 dev_err(&connection->bundle->dev,
305 "failed to connect cport: %d\n", ret);
312 /* Inform Interface about inactive CPorts */
314 gb_connection_control_disconnected(struct gb_connection *connection)
316 struct gb_control *control;
317 u16 cport_id = connection->intf_cport_id;
320 if (gb_connection_is_static(connection))
323 control = connection->intf->control;
325 if (connection == control->connection)
328 ret = gb_control_disconnected_operation(control, cport_id);
330 dev_warn(&connection->bundle->dev,
331 "failed to disconnect cport: %d\n", ret);
336 * Cancel all active operations on a connection.
338 * Locking: Called with connection lock held and state set to DISABLED.
340 static void gb_connection_cancel_operations(struct gb_connection *connection,
343 struct gb_operation *operation;
345 while (!list_empty(&connection->operations)) {
346 operation = list_last_entry(&connection->operations,
347 struct gb_operation, links);
348 gb_operation_get(operation);
349 spin_unlock_irq(&connection->lock);
351 if (gb_operation_is_incoming(operation))
352 gb_operation_cancel_incoming(operation, errno);
354 gb_operation_cancel(operation, errno);
356 gb_operation_put(operation);
358 spin_lock_irq(&connection->lock);
363 * Cancel all active incoming operations on a connection.
365 * Locking: Called with connection lock held and state set to ENABLED_TX.
368 gb_connection_flush_incoming_operations(struct gb_connection *connection,
371 struct gb_operation *operation;
374 while (!list_empty(&connection->operations)) {
376 list_for_each_entry(operation, &connection->operations,
378 if (gb_operation_is_incoming(operation)) {
379 gb_operation_get(operation);
388 spin_unlock_irq(&connection->lock);
390 /* FIXME: flush, not cancel? */
391 gb_operation_cancel_incoming(operation, errno);
392 gb_operation_put(operation);
394 spin_lock_irq(&connection->lock);
398 int gb_connection_enable(struct gb_connection *connection,
399 gb_request_handler_t handler)
403 mutex_lock(&connection->mutex);
405 if (connection->state == GB_CONNECTION_STATE_ENABLED)
408 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
412 spin_lock_irq(&connection->lock);
413 connection->handler = handler;
414 connection->state = GB_CONNECTION_STATE_ENABLED;
415 spin_unlock_irq(&connection->lock);
420 ret = gb_connection_hd_cport_enable(connection);
424 ret = gb_connection_svc_connection_create(connection);
426 goto err_hd_cport_disable;
428 spin_lock_irq(&connection->lock);
429 connection->handler = handler;
431 connection->state = GB_CONNECTION_STATE_ENABLED;
433 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
434 spin_unlock_irq(&connection->lock);
436 ret = gb_connection_control_connected(connection);
438 goto err_svc_destroy;
441 mutex_unlock(&connection->mutex);
446 spin_lock_irq(&connection->lock);
447 connection->state = GB_CONNECTION_STATE_DISABLED;
448 gb_connection_cancel_operations(connection, -ESHUTDOWN);
449 connection->handler = NULL;
450 spin_unlock_irq(&connection->lock);
452 gb_connection_svc_connection_destroy(connection);
453 err_hd_cport_disable:
454 gb_connection_hd_cport_disable(connection);
456 mutex_unlock(&connection->mutex);
460 EXPORT_SYMBOL_GPL(gb_connection_enable);
462 void gb_connection_disable_rx(struct gb_connection *connection)
464 mutex_lock(&connection->mutex);
466 spin_lock_irq(&connection->lock);
467 if (connection->state != GB_CONNECTION_STATE_ENABLED) {
468 spin_unlock_irq(&connection->lock);
471 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
472 gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
473 connection->handler = NULL;
474 spin_unlock_irq(&connection->lock);
477 mutex_unlock(&connection->mutex);
480 void gb_connection_disable(struct gb_connection *connection)
482 mutex_lock(&connection->mutex);
484 if (connection->state == GB_CONNECTION_STATE_DISABLED)
487 gb_connection_control_disconnected(connection);
489 spin_lock_irq(&connection->lock);
490 connection->state = GB_CONNECTION_STATE_DISABLED;
491 gb_connection_cancel_operations(connection, -ESHUTDOWN);
492 connection->handler = NULL;
493 spin_unlock_irq(&connection->lock);
495 gb_connection_svc_connection_destroy(connection);
496 gb_connection_hd_cport_disable(connection);
499 mutex_unlock(&connection->mutex);
501 EXPORT_SYMBOL_GPL(gb_connection_disable);
504 * Tear down a previously set up connection.
506 void gb_connection_destroy(struct gb_connection *connection)
513 spin_lock_irq(&gb_connections_lock);
514 list_del(&connection->bundle_links);
515 list_del(&connection->hd_links);
516 spin_unlock_irq(&gb_connections_lock);
518 id_map = &connection->hd->cport_id_map;
519 ida_simple_remove(id_map, connection->hd_cport_id);
520 connection->hd_cport_id = CPORT_ID_BAD;
522 kref_put_mutex(&connection->kref, gb_connection_kref_release,
526 void gb_connection_latency_tag_enable(struct gb_connection *connection)
528 struct gb_host_device *hd = connection->hd;
531 if (!hd->driver->latency_tag_enable)
534 ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
536 dev_err(&connection->hd->dev,
537 "%s: failed to enable latency tag: %d\n",
538 connection->name, ret);
541 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
543 void gb_connection_latency_tag_disable(struct gb_connection *connection)
545 struct gb_host_device *hd = connection->hd;
548 if (!hd->driver->latency_tag_disable)
551 ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
553 dev_err(&connection->hd->dev,
554 "%s: failed to disable latency tag: %d\n",
555 connection->name, ret);
558 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);