4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/workqueue.h>
15 static void gb_connection_kref_release(struct kref *kref);
18 static DEFINE_SPINLOCK(gb_connections_lock);
19 static DEFINE_MUTEX(gb_connection_mutex);
22 /* Caller holds gb_connection_mutex. */
23 static struct gb_connection *
24 gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
26 struct gb_host_device *hd = intf->hd;
27 struct gb_connection *connection;
29 list_for_each_entry(connection, &hd->connections, hd_links) {
30 if (connection->intf == intf &&
31 connection->intf_cport_id == cport_id)
38 static void gb_connection_get(struct gb_connection *connection)
40 kref_get(&connection->kref);
43 static void gb_connection_put(struct gb_connection *connection)
45 kref_put(&connection->kref, gb_connection_kref_release);
49 * Returns a reference-counted pointer to the connection if found.
51 static struct gb_connection *
52 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
54 struct gb_connection *connection;
57 spin_lock_irqsave(&gb_connections_lock, flags);
58 list_for_each_entry(connection, &hd->connections, hd_links)
59 if (connection->hd_cport_id == cport_id) {
60 gb_connection_get(connection);
65 spin_unlock_irqrestore(&gb_connections_lock, flags);
71 * Callback from the host driver to let us know that data has been
72 * received on the bundle.
74 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
75 u8 *data, size_t length)
77 struct gb_connection *connection;
79 connection = gb_connection_hd_find(hd, cport_id);
82 "nonexistent connection (%zu bytes dropped)\n", length);
85 gb_connection_recv(connection, data, length);
86 gb_connection_put(connection);
88 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
90 static void gb_connection_kref_release(struct kref *kref)
92 struct gb_connection *connection;
94 connection = container_of(kref, struct gb_connection, kref);
99 static void gb_connection_init_name(struct gb_connection *connection)
101 u16 hd_cport_id = connection->hd_cport_id;
105 if (connection->intf) {
106 intf_id = connection->intf->interface_id;
107 cport_id = connection->intf_cport_id;
110 snprintf(connection->name, sizeof(connection->name),
111 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
115 * _gb_connection_create() - create a Greybus connection
116 * @hd: host device of the connection
117 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
118 * @intf: remote interface, or NULL for static connections
119 * @bundle: remote-interface bundle (may be NULL)
120 * @cport_id: remote-interface cport id, or 0 for static connections
121 * @handler: request handler (may be NULL)
123 * Create a Greybus connection, representing the bidirectional link
124 * between a CPort on a (local) Greybus host device and a CPort on
125 * another Greybus interface.
127 * A connection also maintains the state of operations sent over the
130 * Serialised against concurrent create and destroy using the
131 * gb_connection_mutex.
133 * Return: A pointer to the new connection if successful, or an ERR_PTR
136 static struct gb_connection *
137 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
138 struct gb_interface *intf,
139 struct gb_bundle *bundle, int cport_id,
140 gb_request_handler_t handler)
142 struct gb_connection *connection;
143 struct ida *id_map = &hd->cport_id_map;
144 int ida_start, ida_end;
147 if (hd_cport_id < 0) {
149 ida_end = hd->num_cports;
150 } else if (hd_cport_id < hd->num_cports) {
151 ida_start = hd_cport_id;
152 ida_end = hd_cport_id + 1;
154 dev_err(&hd->dev, "cport %d not available\n", hd_cport_id);
155 return ERR_PTR(-EINVAL);
158 mutex_lock(&gb_connection_mutex);
160 if (intf && gb_connection_intf_find(intf, cport_id)) {
161 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
166 ret = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
171 connection = kzalloc(sizeof(*connection), GFP_KERNEL);
177 connection->hd_cport_id = hd_cport_id;
178 connection->intf_cport_id = cport_id;
180 connection->intf = intf;
181 connection->bundle = bundle;
182 connection->handler = handler;
183 connection->state = GB_CONNECTION_STATE_DISABLED;
185 atomic_set(&connection->op_cycle, 0);
186 mutex_init(&connection->mutex);
187 spin_lock_init(&connection->lock);
188 INIT_LIST_HEAD(&connection->operations);
190 connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
191 dev_name(&hd->dev), hd_cport_id);
192 if (!connection->wq) {
194 goto err_free_connection;
197 kref_init(&connection->kref);
199 gb_connection_init_name(connection);
201 spin_lock_irq(&gb_connections_lock);
202 list_add(&connection->hd_links, &hd->connections);
205 list_add(&connection->bundle_links, &bundle->connections);
207 INIT_LIST_HEAD(&connection->bundle_links);
209 spin_unlock_irq(&gb_connections_lock);
211 mutex_unlock(&gb_connection_mutex);
218 ida_simple_remove(id_map, hd_cport_id);
220 mutex_unlock(&gb_connection_mutex);
225 struct gb_connection *
226 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
227 gb_request_handler_t handler)
229 return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler);
232 struct gb_connection *
233 gb_connection_create_control(struct gb_interface *intf)
235 return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL);
238 struct gb_connection *
239 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
240 gb_request_handler_t handler)
242 struct gb_interface *intf = bundle->intf;
244 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
247 EXPORT_SYMBOL_GPL(gb_connection_create);
249 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
251 struct gb_host_device *hd = connection->hd;
254 if (!hd->driver->cport_enable)
257 ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
260 "failed to enable host cport: %d\n", ret);
267 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
269 struct gb_host_device *hd = connection->hd;
271 if (!hd->driver->cport_disable)
274 hd->driver->cport_disable(hd, connection->hd_cport_id);
278 * Request the SVC to create a connection from AP's cport to interface's
282 gb_connection_svc_connection_create(struct gb_connection *connection)
284 struct gb_host_device *hd = connection->hd;
285 struct gb_interface *intf;
288 if (gb_connection_is_static(connection))
291 intf = connection->intf;
292 ret = gb_svc_connection_create(hd->svc,
294 connection->hd_cport_id,
296 connection->intf_cport_id,
297 intf->boot_over_unipro);
299 dev_err(&connection->hd->dev,
300 "%s: failed to create svc connection: %d\n",
301 connection->name, ret);
309 gb_connection_svc_connection_destroy(struct gb_connection *connection)
311 if (gb_connection_is_static(connection))
314 gb_svc_connection_destroy(connection->hd->svc,
315 connection->hd->svc->ap_intf_id,
316 connection->hd_cport_id,
317 connection->intf->interface_id,
318 connection->intf_cport_id);
321 /* Inform Interface about active CPorts */
322 static int gb_connection_control_connected(struct gb_connection *connection)
324 struct gb_control *control;
325 u16 cport_id = connection->intf_cport_id;
328 if (gb_connection_is_static(connection))
331 control = connection->intf->control;
333 if (connection == control->connection)
336 ret = gb_control_connected_operation(control, cport_id);
338 dev_err(&connection->bundle->dev,
339 "failed to connect cport: %d\n", ret);
346 /* Inform Interface about inactive CPorts */
348 gb_connection_control_disconnected(struct gb_connection *connection)
350 struct gb_control *control;
351 u16 cport_id = connection->intf_cport_id;
354 if (gb_connection_is_static(connection))
357 control = connection->intf->control;
359 if (connection == control->connection)
362 ret = gb_control_disconnected_operation(control, cport_id);
364 dev_warn(&connection->bundle->dev,
365 "failed to disconnect cport: %d\n", ret);
370 * Cancel all active operations on a connection.
372 * Locking: Called with connection lock held and state set to DISABLED.
374 static void gb_connection_cancel_operations(struct gb_connection *connection,
377 struct gb_operation *operation;
379 while (!list_empty(&connection->operations)) {
380 operation = list_last_entry(&connection->operations,
381 struct gb_operation, links);
382 gb_operation_get(operation);
383 spin_unlock_irq(&connection->lock);
385 if (gb_operation_is_incoming(operation))
386 gb_operation_cancel_incoming(operation, errno);
388 gb_operation_cancel(operation, errno);
390 gb_operation_put(operation);
392 spin_lock_irq(&connection->lock);
397 * Cancel all active incoming operations on a connection.
399 * Locking: Called with connection lock held and state set to ENABLED_TX.
402 gb_connection_flush_incoming_operations(struct gb_connection *connection,
405 struct gb_operation *operation;
408 while (!list_empty(&connection->operations)) {
410 list_for_each_entry(operation, &connection->operations,
412 if (gb_operation_is_incoming(operation)) {
413 gb_operation_get(operation);
422 spin_unlock_irq(&connection->lock);
424 /* FIXME: flush, not cancel? */
425 gb_operation_cancel_incoming(operation, errno);
426 gb_operation_put(operation);
428 spin_lock_irq(&connection->lock);
433 * _gb_connection_enable() - enable a connection
434 * @connection: connection to enable
435 * @rx: whether to enable incoming requests
437 * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
438 * ENABLED_TX->ENABLED state transitions.
440 * Locking: Caller holds connection->mutex.
442 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
446 /* Handle ENABLED_TX -> ENABLED transitions. */
447 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
448 if (!(connection->handler && rx))
451 spin_lock_irq(&connection->lock);
452 connection->state = GB_CONNECTION_STATE_ENABLED;
453 spin_unlock_irq(&connection->lock);
458 ret = gb_connection_hd_cport_enable(connection);
462 ret = gb_connection_svc_connection_create(connection);
464 goto err_hd_cport_disable;
466 spin_lock_irq(&connection->lock);
467 if (connection->handler && rx)
468 connection->state = GB_CONNECTION_STATE_ENABLED;
470 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
471 spin_unlock_irq(&connection->lock);
473 ret = gb_connection_control_connected(connection);
475 goto err_svc_destroy;
480 spin_lock_irq(&connection->lock);
481 connection->state = GB_CONNECTION_STATE_DISABLED;
482 gb_connection_cancel_operations(connection, -ESHUTDOWN);
483 spin_unlock_irq(&connection->lock);
485 gb_connection_svc_connection_destroy(connection);
486 err_hd_cport_disable:
487 gb_connection_hd_cport_disable(connection);
492 int gb_connection_enable(struct gb_connection *connection)
496 mutex_lock(&connection->mutex);
498 if (connection->state == GB_CONNECTION_STATE_ENABLED)
501 ret = _gb_connection_enable(connection, true);
503 mutex_unlock(&connection->mutex);
507 EXPORT_SYMBOL_GPL(gb_connection_enable);
509 int gb_connection_enable_tx(struct gb_connection *connection)
513 mutex_lock(&connection->mutex);
515 if (connection->state == GB_CONNECTION_STATE_ENABLED) {
520 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
523 ret = _gb_connection_enable(connection, false);
525 mutex_unlock(&connection->mutex);
529 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
531 void gb_connection_disable_rx(struct gb_connection *connection)
533 mutex_lock(&connection->mutex);
535 spin_lock_irq(&connection->lock);
536 if (connection->state != GB_CONNECTION_STATE_ENABLED) {
537 spin_unlock_irq(&connection->lock);
540 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
541 gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
542 spin_unlock_irq(&connection->lock);
545 mutex_unlock(&connection->mutex);
548 void gb_connection_disable(struct gb_connection *connection)
550 mutex_lock(&connection->mutex);
552 if (connection->state == GB_CONNECTION_STATE_DISABLED)
555 gb_connection_control_disconnected(connection);
557 spin_lock_irq(&connection->lock);
558 connection->state = GB_CONNECTION_STATE_DISABLED;
559 gb_connection_cancel_operations(connection, -ESHUTDOWN);
560 spin_unlock_irq(&connection->lock);
562 gb_connection_svc_connection_destroy(connection);
563 gb_connection_hd_cport_disable(connection);
566 mutex_unlock(&connection->mutex);
568 EXPORT_SYMBOL_GPL(gb_connection_disable);
570 /* Caller must have disabled the connection before destroying it. */
571 void gb_connection_destroy(struct gb_connection *connection)
578 mutex_lock(&gb_connection_mutex);
580 spin_lock_irq(&gb_connections_lock);
581 list_del(&connection->bundle_links);
582 list_del(&connection->hd_links);
583 spin_unlock_irq(&gb_connections_lock);
585 destroy_workqueue(connection->wq);
587 id_map = &connection->hd->cport_id_map;
588 ida_simple_remove(id_map, connection->hd_cport_id);
589 connection->hd_cport_id = CPORT_ID_BAD;
591 mutex_unlock(&gb_connection_mutex);
593 gb_connection_put(connection);
595 EXPORT_SYMBOL_GPL(gb_connection_destroy);
597 void gb_connection_latency_tag_enable(struct gb_connection *connection)
599 struct gb_host_device *hd = connection->hd;
602 if (!hd->driver->latency_tag_enable)
605 ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
607 dev_err(&connection->hd->dev,
608 "%s: failed to enable latency tag: %d\n",
609 connection->name, ret);
612 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
614 void gb_connection_latency_tag_disable(struct gb_connection *connection)
616 struct gb_host_device *hd = connection->hd;
619 if (!hd->driver->latency_tag_disable)
622 ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
624 dev_err(&connection->hd->dev,
625 "%s: failed to disable latency tag: %d\n",
626 connection->name, ret);
629 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);