4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/workqueue.h>
15 static void gb_connection_kref_release(struct kref *kref);
18 static DEFINE_SPINLOCK(gb_connections_lock);
19 static DEFINE_MUTEX(gb_connection_mutex);
22 /* Caller holds gb_connection_mutex. */
23 static struct gb_connection *
24 gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
26 struct gb_host_device *hd = intf->hd;
27 struct gb_connection *connection;
29 list_for_each_entry(connection, &hd->connections, hd_links) {
30 if (connection->intf == intf &&
31 connection->intf_cport_id == cport_id)
38 static void gb_connection_get(struct gb_connection *connection)
40 kref_get(&connection->kref);
43 static void gb_connection_put(struct gb_connection *connection)
45 kref_put(&connection->kref, gb_connection_kref_release);
49 * Returns a reference-counted pointer to the connection if found.
51 static struct gb_connection *
52 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
54 struct gb_connection *connection;
57 spin_lock_irqsave(&gb_connections_lock, flags);
58 list_for_each_entry(connection, &hd->connections, hd_links)
59 if (connection->hd_cport_id == cport_id) {
60 gb_connection_get(connection);
65 spin_unlock_irqrestore(&gb_connections_lock, flags);
71 * Callback from the host driver to let us know that data has been
72 * received on the bundle.
74 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
75 u8 *data, size_t length)
77 struct gb_connection *connection;
79 connection = gb_connection_hd_find(hd, cport_id);
82 "nonexistent connection (%zu bytes dropped)\n", length);
85 gb_connection_recv(connection, data, length);
86 gb_connection_put(connection);
88 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
90 static void gb_connection_kref_release(struct kref *kref)
92 struct gb_connection *connection;
94 connection = container_of(kref, struct gb_connection, kref);
99 static void gb_connection_init_name(struct gb_connection *connection)
101 u16 hd_cport_id = connection->hd_cport_id;
105 if (connection->intf) {
106 intf_id = connection->intf->interface_id;
107 cport_id = connection->intf_cport_id;
110 snprintf(connection->name, sizeof(connection->name),
111 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
115 * _gb_connection_create() - create a Greybus connection
116 * @hd: host device of the connection
117 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
118 * @intf: remote interface, or NULL for static connections
119 * @bundle: remote-interface bundle (may be NULL)
120 * @cport_id: remote-interface cport id, or 0 for static connections
121 * @handler: request handler (may be NULL)
122 * @flags: connection flags
124 * Create a Greybus connection, representing the bidirectional link
125 * between a CPort on a (local) Greybus host device and a CPort on
126 * another Greybus interface.
128 * A connection also maintains the state of operations sent over the
131 * Serialised against concurrent create and destroy using the
132 * gb_connection_mutex.
134 * Return: A pointer to the new connection if successful, or an ERR_PTR
137 static struct gb_connection *
138 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
139 struct gb_interface *intf,
140 struct gb_bundle *bundle, int cport_id,
141 gb_request_handler_t handler,
144 struct gb_connection *connection;
145 struct ida *id_map = &hd->cport_id_map;
146 int ida_start, ida_end;
149 if (hd_cport_id < 0) {
151 ida_end = hd->num_cports;
152 } else if (hd_cport_id < hd->num_cports) {
153 ida_start = hd_cport_id;
154 ida_end = hd_cport_id + 1;
156 dev_err(&hd->dev, "cport %d not available\n", hd_cport_id);
157 return ERR_PTR(-EINVAL);
160 mutex_lock(&gb_connection_mutex);
162 if (intf && gb_connection_intf_find(intf, cport_id)) {
163 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
168 ret = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
173 connection = kzalloc(sizeof(*connection), GFP_KERNEL);
179 connection->hd_cport_id = hd_cport_id;
180 connection->intf_cport_id = cport_id;
182 connection->intf = intf;
183 connection->bundle = bundle;
184 connection->handler = handler;
185 connection->flags = flags;
186 connection->state = GB_CONNECTION_STATE_DISABLED;
188 atomic_set(&connection->op_cycle, 0);
189 mutex_init(&connection->mutex);
190 spin_lock_init(&connection->lock);
191 INIT_LIST_HEAD(&connection->operations);
193 connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
194 dev_name(&hd->dev), hd_cport_id);
195 if (!connection->wq) {
197 goto err_free_connection;
200 kref_init(&connection->kref);
202 gb_connection_init_name(connection);
204 spin_lock_irq(&gb_connections_lock);
205 list_add(&connection->hd_links, &hd->connections);
208 list_add(&connection->bundle_links, &bundle->connections);
210 INIT_LIST_HEAD(&connection->bundle_links);
212 spin_unlock_irq(&gb_connections_lock);
214 mutex_unlock(&gb_connection_mutex);
221 ida_simple_remove(id_map, hd_cport_id);
223 mutex_unlock(&gb_connection_mutex);
228 struct gb_connection *
229 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
230 gb_request_handler_t handler)
232 return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
236 struct gb_connection *
237 gb_connection_create_control(struct gb_interface *intf)
239 return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL, 0);
242 struct gb_connection *
243 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
244 gb_request_handler_t handler)
246 struct gb_interface *intf = bundle->intf;
248 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
251 EXPORT_SYMBOL_GPL(gb_connection_create);
253 struct gb_connection *
254 gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
255 gb_request_handler_t handler,
258 struct gb_interface *intf = bundle->intf;
260 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
263 EXPORT_SYMBOL_GPL(gb_connection_create_flags);
265 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
267 struct gb_host_device *hd = connection->hd;
270 if (!hd->driver->cport_enable)
273 ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
276 "failed to enable host cport: %d\n", ret);
283 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
285 struct gb_host_device *hd = connection->hd;
287 if (!hd->driver->cport_disable)
290 hd->driver->cport_disable(hd, connection->hd_cport_id);
293 static int gb_connection_hd_fct_flow_enable(struct gb_connection *connection)
295 struct gb_host_device *hd = connection->hd;
298 if (!hd->driver->fct_flow_enable)
301 ret = hd->driver->fct_flow_enable(hd, connection->hd_cport_id);
303 dev_err(&hd->dev, "%s: failed to enable FCT flow: %d\n",
304 connection->name, ret);
311 static void gb_connection_hd_fct_flow_disable(struct gb_connection *connection)
313 struct gb_host_device *hd = connection->hd;
315 if (!hd->driver->fct_flow_disable)
318 hd->driver->fct_flow_disable(hd, connection->hd_cport_id);
322 * Request the SVC to create a connection from AP's cport to interface's
326 gb_connection_svc_connection_create(struct gb_connection *connection)
328 struct gb_host_device *hd = connection->hd;
329 struct gb_interface *intf;
333 if (gb_connection_is_static(connection))
334 return gb_connection_hd_fct_flow_enable(connection);
336 intf = connection->intf;
338 /* The ES2/ES3 bootrom requires E2EFC, CSD and CSV to be disabled. */
339 cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
340 if (intf->boot_over_unipro) {
341 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
343 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
344 GB_SVC_CPORT_FLAG_E2EFC;
347 ret = gb_svc_connection_create(hd->svc,
349 connection->hd_cport_id,
351 connection->intf_cport_id,
354 dev_err(&connection->hd->dev,
355 "%s: failed to create svc connection: %d\n",
356 connection->name, ret);
360 ret = gb_connection_hd_fct_flow_enable(connection);
362 gb_svc_connection_destroy(hd->svc, hd->svc->ap_intf_id,
363 connection->hd_cport_id,
365 connection->intf_cport_id);
373 gb_connection_svc_connection_destroy(struct gb_connection *connection)
375 gb_connection_hd_fct_flow_disable(connection);
377 if (gb_connection_is_static(connection))
380 gb_svc_connection_destroy(connection->hd->svc,
381 connection->hd->svc->ap_intf_id,
382 connection->hd_cport_id,
383 connection->intf->interface_id,
384 connection->intf_cport_id);
387 /* Inform Interface about active CPorts */
388 static int gb_connection_control_connected(struct gb_connection *connection)
390 struct gb_control *control;
391 u16 cport_id = connection->intf_cport_id;
394 if (gb_connection_is_static(connection))
397 control = connection->intf->control;
399 if (connection == control->connection)
402 ret = gb_control_connected_operation(control, cport_id);
404 dev_err(&connection->bundle->dev,
405 "failed to connect cport: %d\n", ret);
412 /* Inform Interface about inactive CPorts */
414 gb_connection_control_disconnected(struct gb_connection *connection)
416 struct gb_control *control;
417 u16 cport_id = connection->intf_cport_id;
420 if (gb_connection_is_static(connection))
423 control = connection->intf->control;
425 if (connection == control->connection)
428 ret = gb_control_disconnected_operation(control, cport_id);
430 dev_warn(&connection->bundle->dev,
431 "failed to disconnect cport: %d\n", ret);
436 * Cancel all active operations on a connection.
438 * Locking: Called with connection lock held and state set to DISABLED.
440 static void gb_connection_cancel_operations(struct gb_connection *connection,
442 __must_hold(&connection->lock)
444 struct gb_operation *operation;
446 while (!list_empty(&connection->operations)) {
447 operation = list_last_entry(&connection->operations,
448 struct gb_operation, links);
449 gb_operation_get(operation);
450 spin_unlock_irq(&connection->lock);
452 if (gb_operation_is_incoming(operation))
453 gb_operation_cancel_incoming(operation, errno);
455 gb_operation_cancel(operation, errno);
457 gb_operation_put(operation);
459 spin_lock_irq(&connection->lock);
464 * Cancel all active incoming operations on a connection.
466 * Locking: Called with connection lock held and state set to ENABLED_TX.
469 gb_connection_flush_incoming_operations(struct gb_connection *connection,
471 __must_hold(&connection->lock)
473 struct gb_operation *operation;
476 while (!list_empty(&connection->operations)) {
478 list_for_each_entry(operation, &connection->operations,
480 if (gb_operation_is_incoming(operation)) {
481 gb_operation_get(operation);
490 spin_unlock_irq(&connection->lock);
492 /* FIXME: flush, not cancel? */
493 gb_operation_cancel_incoming(operation, errno);
494 gb_operation_put(operation);
496 spin_lock_irq(&connection->lock);
501 * _gb_connection_enable() - enable a connection
502 * @connection: connection to enable
503 * @rx: whether to enable incoming requests
505 * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
506 * ENABLED_TX->ENABLED state transitions.
508 * Locking: Caller holds connection->mutex.
510 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
514 /* Handle ENABLED_TX -> ENABLED transitions. */
515 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
516 if (!(connection->handler && rx))
519 spin_lock_irq(&connection->lock);
520 connection->state = GB_CONNECTION_STATE_ENABLED;
521 spin_unlock_irq(&connection->lock);
526 ret = gb_connection_hd_cport_enable(connection);
530 ret = gb_connection_svc_connection_create(connection);
532 goto err_hd_cport_disable;
534 spin_lock_irq(&connection->lock);
535 if (connection->handler && rx)
536 connection->state = GB_CONNECTION_STATE_ENABLED;
538 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
539 spin_unlock_irq(&connection->lock);
541 ret = gb_connection_control_connected(connection);
543 goto err_svc_destroy;
548 spin_lock_irq(&connection->lock);
549 connection->state = GB_CONNECTION_STATE_DISABLED;
550 gb_connection_cancel_operations(connection, -ESHUTDOWN);
551 spin_unlock_irq(&connection->lock);
553 gb_connection_svc_connection_destroy(connection);
554 err_hd_cport_disable:
555 gb_connection_hd_cport_disable(connection);
560 int gb_connection_enable(struct gb_connection *connection)
564 mutex_lock(&connection->mutex);
566 if (connection->state == GB_CONNECTION_STATE_ENABLED)
569 ret = _gb_connection_enable(connection, true);
571 mutex_unlock(&connection->mutex);
575 EXPORT_SYMBOL_GPL(gb_connection_enable);
577 int gb_connection_enable_tx(struct gb_connection *connection)
581 mutex_lock(&connection->mutex);
583 if (connection->state == GB_CONNECTION_STATE_ENABLED) {
588 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
591 ret = _gb_connection_enable(connection, false);
593 mutex_unlock(&connection->mutex);
597 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
599 void gb_connection_disable_rx(struct gb_connection *connection)
601 mutex_lock(&connection->mutex);
603 spin_lock_irq(&connection->lock);
604 if (connection->state != GB_CONNECTION_STATE_ENABLED) {
605 spin_unlock_irq(&connection->lock);
608 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
609 gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
610 spin_unlock_irq(&connection->lock);
613 mutex_unlock(&connection->mutex);
616 void gb_connection_disable(struct gb_connection *connection)
618 mutex_lock(&connection->mutex);
620 if (connection->state == GB_CONNECTION_STATE_DISABLED)
623 gb_connection_control_disconnected(connection);
625 spin_lock_irq(&connection->lock);
626 connection->state = GB_CONNECTION_STATE_DISABLED;
627 gb_connection_cancel_operations(connection, -ESHUTDOWN);
628 spin_unlock_irq(&connection->lock);
630 gb_connection_svc_connection_destroy(connection);
631 gb_connection_hd_cport_disable(connection);
634 mutex_unlock(&connection->mutex);
636 EXPORT_SYMBOL_GPL(gb_connection_disable);
638 /* Caller must have disabled the connection before destroying it. */
639 void gb_connection_destroy(struct gb_connection *connection)
646 mutex_lock(&gb_connection_mutex);
648 spin_lock_irq(&gb_connections_lock);
649 list_del(&connection->bundle_links);
650 list_del(&connection->hd_links);
651 spin_unlock_irq(&gb_connections_lock);
653 destroy_workqueue(connection->wq);
655 id_map = &connection->hd->cport_id_map;
656 ida_simple_remove(id_map, connection->hd_cport_id);
657 connection->hd_cport_id = CPORT_ID_BAD;
659 mutex_unlock(&gb_connection_mutex);
661 gb_connection_put(connection);
663 EXPORT_SYMBOL_GPL(gb_connection_destroy);
665 void gb_connection_latency_tag_enable(struct gb_connection *connection)
667 struct gb_host_device *hd = connection->hd;
670 if (!hd->driver->latency_tag_enable)
673 ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
675 dev_err(&connection->hd->dev,
676 "%s: failed to enable latency tag: %d\n",
677 connection->name, ret);
680 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
682 void gb_connection_latency_tag_disable(struct gb_connection *connection)
684 struct gb_host_device *hd = connection->hd;
687 if (!hd->driver->latency_tag_disable)
690 ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
692 dev_err(&connection->hd->dev,
693 "%s: failed to disable latency tag: %d\n",
694 connection->name, ret);
697 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);