4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/input.h>
11 #include <linux/workqueue.h>
15 #define SVC_KEY_ARA_BUTTON KEY_A
17 struct gb_svc_deferred_request {
18 struct work_struct work;
19 struct gb_operation *operation;
23 static ssize_t endo_id_show(struct device *dev,
24 struct device_attribute *attr, char *buf)
26 struct gb_svc *svc = to_gb_svc(dev);
28 return sprintf(buf, "0x%04x\n", svc->endo_id);
30 static DEVICE_ATTR_RO(endo_id);
32 static ssize_t ap_intf_id_show(struct device *dev,
33 struct device_attribute *attr, char *buf)
35 struct gb_svc *svc = to_gb_svc(dev);
37 return sprintf(buf, "%u\n", svc->ap_intf_id);
39 static DEVICE_ATTR_RO(ap_intf_id);
43 // This is a hack, we need to do this "right" and clean the interface up
44 // properly, not just forcibly yank the thing out of the system and hope for the
45 // best. But for now, people want their modules to come out without having to
46 // throw the thing to the ground or get out a screwdriver.
47 static ssize_t intf_eject_store(struct device *dev,
48 struct device_attribute *attr, const char *buf,
51 struct gb_svc *svc = to_gb_svc(dev);
52 unsigned short intf_id;
55 ret = kstrtou16(buf, 10, &intf_id);
59 dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
61 ret = gb_svc_intf_eject(svc, intf_id);
67 static DEVICE_ATTR_WO(intf_eject);
69 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
72 struct gb_svc *svc = to_gb_svc(dev);
74 return sprintf(buf, "%s\n",
75 gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
78 static ssize_t watchdog_store(struct device *dev,
79 struct device_attribute *attr, const char *buf,
82 struct gb_svc *svc = to_gb_svc(dev);
86 retval = strtobool(buf, &user_request);
91 retval = gb_svc_watchdog_enable(svc);
93 retval = gb_svc_watchdog_disable(svc);
98 static DEVICE_ATTR_RW(watchdog);
100 static struct attribute *svc_attrs[] = {
101 &dev_attr_endo_id.attr,
102 &dev_attr_ap_intf_id.attr,
103 &dev_attr_intf_eject.attr,
104 &dev_attr_watchdog.attr,
107 ATTRIBUTE_GROUPS(svc);
109 static int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
111 struct gb_svc_intf_device_id_request request;
113 request.intf_id = intf_id;
114 request.device_id = device_id;
116 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
117 &request, sizeof(request), NULL, 0);
120 int gb_svc_intf_reset(struct gb_svc *svc, u8 intf_id)
122 struct gb_svc_intf_reset_request request;
124 request.intf_id = intf_id;
126 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_RESET,
127 &request, sizeof(request), NULL, 0);
129 EXPORT_SYMBOL_GPL(gb_svc_intf_reset);
131 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
133 struct gb_svc_intf_eject_request request;
135 request.intf_id = intf_id;
138 * The pulse width for module release in svc is long so we need to
139 * increase the timeout so the operation will not return to soon.
141 return gb_operation_sync_timeout(svc->connection,
142 GB_SVC_TYPE_INTF_EJECT, &request,
143 sizeof(request), NULL, 0,
146 EXPORT_SYMBOL_GPL(gb_svc_intf_eject);
148 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
151 struct gb_svc_dme_peer_get_request request;
152 struct gb_svc_dme_peer_get_response response;
156 request.intf_id = intf_id;
157 request.attr = cpu_to_le16(attr);
158 request.selector = cpu_to_le16(selector);
160 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
161 &request, sizeof(request),
162 &response, sizeof(response));
164 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
165 intf_id, attr, selector, ret);
169 result = le16_to_cpu(response.result_code);
171 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
172 intf_id, attr, selector, result);
177 *value = le32_to_cpu(response.attr_value);
181 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
183 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
186 struct gb_svc_dme_peer_set_request request;
187 struct gb_svc_dme_peer_set_response response;
191 request.intf_id = intf_id;
192 request.attr = cpu_to_le16(attr);
193 request.selector = cpu_to_le16(selector);
194 request.value = cpu_to_le32(value);
196 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
197 &request, sizeof(request),
198 &response, sizeof(response));
200 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
201 intf_id, attr, selector, value, ret);
205 result = le16_to_cpu(response.result_code);
207 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
208 intf_id, attr, selector, value, result);
214 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
217 * T_TstSrcIncrement is written by the module on ES2 as a stand-in for boot
218 * status attribute ES3_INIT_STATUS. AP needs to read and clear it, after
219 * reading a non-zero value from it.
221 * FIXME: This is module-hardware dependent and needs to be extended for every
222 * type of module we want to support.
224 static int gb_svc_read_and_clear_module_boot_status(struct gb_interface *intf)
226 struct gb_host_device *hd = intf->hd;
233 * Check if the module is ES2 or ES3, and choose attr number
235 * FIXME: Remove ES2 support from the kernel entirely.
237 if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
238 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID)
239 attr = DME_ATTR_T_TST_SRC_INCREMENT;
241 attr = DME_ATTR_ES3_INIT_STATUS;
243 /* Read and clear boot status in ES3_INIT_STATUS */
244 ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
245 DME_ATTR_SELECTOR_INDEX, &value);
251 * A nonzero boot status indicates the module has finished
255 dev_err(&intf->dev, "Module not ready yet\n");
260 * Check if the module needs to boot from UniPro.
261 * For ES2: We need to check lowest 8 bits of 'value'.
262 * For ES3: We need to check highest 8 bits out of 32 of 'value'.
263 * FIXME: Remove ES2 support from the kernel entirely.
265 if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
266 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID)
269 init_status = value >> 24;
271 if (init_status == DME_DIS_UNIPRO_BOOT_STARTED ||
272 init_status == DME_DIS_FALLBACK_UNIPRO_BOOT_STARTED)
273 intf->boot_over_unipro = true;
275 return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
276 DME_ATTR_SELECTOR_INDEX, 0);
279 int gb_svc_connection_create(struct gb_svc *svc,
280 u8 intf1_id, u16 cport1_id,
281 u8 intf2_id, u16 cport2_id,
284 struct gb_svc_conn_create_request request;
286 request.intf1_id = intf1_id;
287 request.cport1_id = cpu_to_le16(cport1_id);
288 request.intf2_id = intf2_id;
289 request.cport2_id = cpu_to_le16(cport2_id);
290 request.tc = 0; /* TC0 */
291 request.flags = cport_flags;
293 return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
294 &request, sizeof(request), NULL, 0);
296 EXPORT_SYMBOL_GPL(gb_svc_connection_create);
298 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
299 u8 intf2_id, u16 cport2_id)
301 struct gb_svc_conn_destroy_request request;
302 struct gb_connection *connection = svc->connection;
305 request.intf1_id = intf1_id;
306 request.cport1_id = cpu_to_le16(cport1_id);
307 request.intf2_id = intf2_id;
308 request.cport2_id = cpu_to_le16(cport2_id);
310 ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
311 &request, sizeof(request), NULL, 0);
313 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
314 intf1_id, cport1_id, intf2_id, cport2_id, ret);
317 EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
319 /* Creates bi-directional routes between the devices */
320 static int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
321 u8 intf2_id, u8 dev2_id)
323 struct gb_svc_route_create_request request;
325 request.intf1_id = intf1_id;
326 request.dev1_id = dev1_id;
327 request.intf2_id = intf2_id;
328 request.dev2_id = dev2_id;
330 return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
331 &request, sizeof(request), NULL, 0);
334 /* Destroys bi-directional routes between the devices */
335 static void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
337 struct gb_svc_route_destroy_request request;
340 request.intf1_id = intf1_id;
341 request.intf2_id = intf2_id;
343 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
344 &request, sizeof(request), NULL, 0);
346 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
347 intf1_id, intf2_id, ret);
351 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
352 u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
353 u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
354 u8 flags, u32 quirks)
356 struct gb_svc_intf_set_pwrm_request request;
357 struct gb_svc_intf_set_pwrm_response response;
360 request.intf_id = intf_id;
361 request.hs_series = hs_series;
362 request.tx_mode = tx_mode;
363 request.tx_gear = tx_gear;
364 request.tx_nlanes = tx_nlanes;
365 request.rx_mode = rx_mode;
366 request.rx_gear = rx_gear;
367 request.rx_nlanes = rx_nlanes;
368 request.flags = flags;
369 request.quirks = cpu_to_le32(quirks);
371 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
372 &request, sizeof(request),
373 &response, sizeof(response));
377 return le16_to_cpu(response.result_code);
379 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
381 int gb_svc_ping(struct gb_svc *svc)
383 return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
385 GB_OPERATION_TIMEOUT_DEFAULT * 2);
387 EXPORT_SYMBOL_GPL(gb_svc_ping);
389 static int gb_svc_version_request(struct gb_operation *op)
391 struct gb_connection *connection = op->connection;
392 struct gb_svc *svc = connection->private;
393 struct gb_protocol_version_request *request;
394 struct gb_protocol_version_response *response;
396 if (op->request->payload_size < sizeof(*request)) {
397 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
398 op->request->payload_size,
403 request = op->request->payload;
405 if (request->major > GB_SVC_VERSION_MAJOR) {
406 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
407 request->major, GB_SVC_VERSION_MAJOR);
411 svc->protocol_major = request->major;
412 svc->protocol_minor = request->minor;
414 if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
417 response = op->response->payload;
418 response->major = svc->protocol_major;
419 response->minor = svc->protocol_minor;
424 static int gb_svc_hello(struct gb_operation *op)
426 struct gb_connection *connection = op->connection;
427 struct gb_svc *svc = connection->private;
428 struct gb_svc_hello_request *hello_request;
431 if (op->request->payload_size < sizeof(*hello_request)) {
432 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
433 op->request->payload_size,
434 sizeof(*hello_request));
438 hello_request = op->request->payload;
439 svc->endo_id = le16_to_cpu(hello_request->endo_id);
440 svc->ap_intf_id = hello_request->interface_id;
442 ret = device_add(&svc->dev);
444 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
448 ret = input_register_device(svc->input);
450 dev_err(&svc->dev, "failed to register input: %d\n", ret);
451 device_del(&svc->dev);
455 ret = gb_svc_watchdog_create(svc);
457 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
458 input_unregister_device(svc->input);
459 device_del(&svc->dev);
466 static int gb_svc_interface_route_create(struct gb_svc *svc,
467 struct gb_interface *intf)
469 u8 intf_id = intf->interface_id;
474 * Create a device id for the interface:
475 * - device id 0 (GB_DEVICE_ID_SVC) belongs to the SVC
476 * - device id 1 (GB_DEVICE_ID_AP) belongs to the AP
478 * XXX Do we need to allocate device ID for SVC or the AP here? And what
479 * XXX about an AP with multiple interface blocks?
481 ret = ida_simple_get(&svc->device_id_map,
482 GB_DEVICE_ID_MODULES_START, 0, GFP_KERNEL);
484 dev_err(&svc->dev, "failed to allocate device id for interface %u: %d\n",
490 ret = gb_svc_intf_device_id(svc, intf_id, device_id);
492 dev_err(&svc->dev, "failed to set device id %u for interface %u: %d\n",
493 device_id, intf_id, ret);
497 /* Create a two-way route between the AP and the new interface. */
498 ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_DEVICE_ID_AP,
501 dev_err(&svc->dev, "failed to create route to interface %u (device id %u): %d\n",
502 intf_id, device_id, ret);
503 goto err_svc_id_free;
506 intf->device_id = device_id;
512 * XXX Should we tell SVC that this id doesn't belong to interface
516 ida_simple_remove(&svc->device_id_map, device_id);
521 static void gb_svc_intf_remove(struct gb_svc *svc, struct gb_interface *intf)
523 u8 intf_id = intf->interface_id;
524 u8 device_id = intf->device_id;
526 intf->disconnected = true;
528 gb_interface_remove(intf);
531 * Destroy the two-way route between the AP and the interface.
533 gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
535 ida_simple_remove(&svc->device_id_map, device_id);
538 static void gb_svc_process_intf_hotplug(struct gb_operation *operation)
540 struct gb_svc_intf_hotplug_request *request;
541 struct gb_connection *connection = operation->connection;
542 struct gb_svc *svc = connection->private;
543 struct gb_host_device *hd = connection->hd;
544 struct gb_interface *intf;
550 /* The request message size has already been verified. */
551 request = operation->request->payload;
552 intf_id = request->intf_id;
554 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
556 intf = gb_interface_find(hd, intf_id);
559 * For ES2, we need to maintain the same vendor/product ids we
560 * got from bootrom, otherwise userspace can't distinguish
563 vendor_id = intf->vendor_id;
564 product_id = intf->product_id;
567 * We have received a hotplug request for an interface that
570 * This can happen in cases like:
571 * - bootrom loading the firmware image and booting into that,
572 * which only generates a hotplug event. i.e. no hot-unplug
574 * - Or the firmware on the module crashed and sent hotplug
575 * request again to the SVC, which got propagated to AP.
577 * Remove the interface and add it again, and let user know
578 * about this with a print message.
580 dev_info(&svc->dev, "removing interface %u to add it again\n",
582 gb_svc_intf_remove(svc, intf);
585 intf = gb_interface_create(hd, intf_id);
587 dev_err(&svc->dev, "failed to create interface %u\n",
592 intf->ddbl1_manufacturer_id = le32_to_cpu(request->data.ddbl1_mfr_id);
593 intf->ddbl1_product_id = le32_to_cpu(request->data.ddbl1_prod_id);
594 intf->vendor_id = le32_to_cpu(request->data.ara_vend_id);
595 intf->product_id = le32_to_cpu(request->data.ara_prod_id);
596 intf->serial_number = le64_to_cpu(request->data.serial_number);
599 * Use VID/PID specified at hotplug if:
600 * - Bridge ASIC chip isn't ES2
601 * - Received non-zero Vendor/Product ids
603 * Otherwise, use the ids we received from bootrom.
605 if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
606 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID &&
607 intf->vendor_id == 0 && intf->product_id == 0) {
608 intf->vendor_id = vendor_id;
609 intf->product_id = product_id;
612 ret = gb_svc_read_and_clear_module_boot_status(intf);
614 dev_err(&svc->dev, "failed to clear boot status of interface %u: %d\n",
616 goto destroy_interface;
619 ret = gb_svc_interface_route_create(svc, intf);
621 goto destroy_interface;
623 ret = gb_interface_init(intf);
625 dev_err(&svc->dev, "failed to initialize interface %u: %d\n",
633 gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
634 ida_simple_remove(&svc->device_id_map, intf->device_id);
636 gb_interface_remove(intf);
639 static void gb_svc_process_intf_hot_unplug(struct gb_operation *operation)
641 struct gb_svc *svc = operation->connection->private;
642 struct gb_svc_intf_hot_unplug_request *request;
643 struct gb_host_device *hd = operation->connection->hd;
644 struct gb_interface *intf;
647 /* The request message size has already been verified. */
648 request = operation->request->payload;
649 intf_id = request->intf_id;
651 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
653 intf = gb_interface_find(hd, intf_id);
655 dev_warn(&svc->dev, "could not find hot-unplug interface %u\n",
660 gb_svc_intf_remove(svc, intf);
663 static void gb_svc_process_deferred_request(struct work_struct *work)
665 struct gb_svc_deferred_request *dr;
666 struct gb_operation *operation;
670 dr = container_of(work, struct gb_svc_deferred_request, work);
671 operation = dr->operation;
672 svc = operation->connection->private;
673 type = operation->request->header->type;
676 case GB_SVC_TYPE_INTF_HOTPLUG:
677 gb_svc_process_intf_hotplug(operation);
679 case GB_SVC_TYPE_INTF_HOT_UNPLUG:
680 gb_svc_process_intf_hot_unplug(operation);
683 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
686 gb_operation_put(operation);
690 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
692 struct gb_svc *svc = operation->connection->private;
693 struct gb_svc_deferred_request *dr;
695 dr = kmalloc(sizeof(*dr), GFP_KERNEL);
699 gb_operation_get(operation);
701 dr->operation = operation;
702 INIT_WORK(&dr->work, gb_svc_process_deferred_request);
704 queue_work(svc->wq, &dr->work);
710 * Bringing up a module can be time consuming, as that may require lots of
711 * initialization on the module side. Over that, we may also need to download
712 * the firmware first and flash that on the module.
714 * In order not to make other svc events wait for all this to finish,
715 * handle most of module hotplug stuff outside of the hotplug callback, with
716 * help of a workqueue.
718 static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
720 struct gb_svc *svc = op->connection->private;
721 struct gb_svc_intf_hotplug_request *request;
723 if (op->request->payload_size < sizeof(*request)) {
724 dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n",
725 op->request->payload_size, sizeof(*request));
729 request = op->request->payload;
731 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
733 return gb_svc_queue_deferred_request(op);
736 static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
738 struct gb_svc *svc = op->connection->private;
739 struct gb_svc_intf_hot_unplug_request *request;
741 if (op->request->payload_size < sizeof(*request)) {
742 dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n",
743 op->request->payload_size, sizeof(*request));
747 request = op->request->payload;
749 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
751 return gb_svc_queue_deferred_request(op);
754 static int gb_svc_intf_reset_recv(struct gb_operation *op)
756 struct gb_svc *svc = op->connection->private;
757 struct gb_message *request = op->request;
758 struct gb_svc_intf_reset_request *reset;
761 if (request->payload_size < sizeof(*reset)) {
762 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
763 request->payload_size, sizeof(*reset));
766 reset = request->payload;
768 intf_id = reset->intf_id;
770 /* FIXME Reset the interface here */
775 static int gb_svc_key_code_map(struct gb_svc *svc, u16 key_code, u16 *code)
779 *code = SVC_KEY_ARA_BUTTON;
782 dev_warn(&svc->dev, "unknown keycode received: %u\n", key_code);
789 static int gb_svc_key_event_recv(struct gb_operation *op)
791 struct gb_svc *svc = op->connection->private;
792 struct gb_message *request = op->request;
793 struct gb_svc_key_event_request *key;
798 if (request->payload_size < sizeof(*key)) {
799 dev_warn(&svc->dev, "short key request received (%zu < %zu)\n",
800 request->payload_size, sizeof(*key));
804 key = request->payload;
806 ret = gb_svc_key_code_map(svc, le16_to_cpu(key->key_code), &code);
810 event = key->key_event;
811 if ((event != GB_SVC_KEY_PRESSED) && (event != GB_SVC_KEY_RELEASED)) {
812 dev_warn(&svc->dev, "unknown key event received: %u\n", event);
816 input_report_key(svc->input, code, (event == GB_SVC_KEY_PRESSED));
817 input_sync(svc->input);
822 static int gb_svc_request_handler(struct gb_operation *op)
824 struct gb_connection *connection = op->connection;
825 struct gb_svc *svc = connection->private;
830 * SVC requests need to follow a specific order (at least initially) and
831 * below code takes care of enforcing that. The expected order is:
834 * - Any other request, but the earlier two.
836 * Incoming requests are guaranteed to be serialized and so we don't
837 * need to protect 'state' for any races.
840 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
841 if (svc->state != GB_SVC_STATE_RESET)
844 case GB_SVC_TYPE_SVC_HELLO:
845 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
849 if (svc->state != GB_SVC_STATE_SVC_HELLO)
855 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
861 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
862 ret = gb_svc_version_request(op);
864 svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
866 case GB_SVC_TYPE_SVC_HELLO:
867 ret = gb_svc_hello(op);
869 svc->state = GB_SVC_STATE_SVC_HELLO;
871 case GB_SVC_TYPE_INTF_HOTPLUG:
872 return gb_svc_intf_hotplug_recv(op);
873 case GB_SVC_TYPE_INTF_HOT_UNPLUG:
874 return gb_svc_intf_hot_unplug_recv(op);
875 case GB_SVC_TYPE_INTF_RESET:
876 return gb_svc_intf_reset_recv(op);
877 case GB_SVC_TYPE_KEY_EVENT:
878 return gb_svc_key_event_recv(op);
880 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
885 static struct input_dev *gb_svc_input_create(struct gb_svc *svc)
887 struct input_dev *input_dev;
889 input_dev = input_allocate_device();
891 return ERR_PTR(-ENOMEM);
893 input_dev->name = dev_name(&svc->dev);
894 svc->input_phys = kasprintf(GFP_KERNEL, "greybus-%s/input0",
896 if (!svc->input_phys)
899 input_dev->phys = svc->input_phys;
900 input_dev->dev.parent = &svc->dev;
902 input_set_drvdata(input_dev, svc);
904 input_set_capability(input_dev, EV_KEY, SVC_KEY_ARA_BUTTON);
909 input_free_device(svc->input);
910 return ERR_PTR(-ENOMEM);
913 static void gb_svc_release(struct device *dev)
915 struct gb_svc *svc = to_gb_svc(dev);
918 gb_connection_destroy(svc->connection);
919 ida_destroy(&svc->device_id_map);
920 destroy_workqueue(svc->wq);
921 kfree(svc->input_phys);
925 struct device_type greybus_svc_type = {
926 .name = "greybus_svc",
927 .release = gb_svc_release,
930 struct gb_svc *gb_svc_create(struct gb_host_device *hd)
934 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
938 svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
944 svc->dev.parent = &hd->dev;
945 svc->dev.bus = &greybus_bus_type;
946 svc->dev.type = &greybus_svc_type;
947 svc->dev.groups = svc_groups;
948 svc->dev.dma_mask = svc->dev.parent->dma_mask;
949 device_initialize(&svc->dev);
951 dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
953 ida_init(&svc->device_id_map);
954 svc->state = GB_SVC_STATE_RESET;
957 svc->input = gb_svc_input_create(svc);
958 if (IS_ERR(svc->input)) {
959 dev_err(&svc->dev, "failed to create input device: %ld\n",
960 PTR_ERR(svc->input));
964 svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
965 gb_svc_request_handler);
966 if (IS_ERR(svc->connection)) {
967 dev_err(&svc->dev, "failed to create connection: %ld\n",
968 PTR_ERR(svc->connection));
972 svc->connection->private = svc;
977 input_free_device(svc->input);
979 put_device(&svc->dev);
983 int gb_svc_add(struct gb_svc *svc)
988 * The SVC protocol is currently driven by the SVC, so the SVC device
989 * is added from the connection request handler when enough
990 * information has been received.
992 ret = gb_connection_enable(svc->connection);
999 void gb_svc_del(struct gb_svc *svc)
1001 gb_connection_disable(svc->connection);
1004 * The SVC device and input device may have been registered
1005 * from the request handler.
1007 if (device_is_registered(&svc->dev)) {
1008 gb_svc_watchdog_destroy(svc);
1009 input_unregister_device(svc->input);
1010 device_del(&svc->dev);
1013 flush_workqueue(svc->wq);
1016 void gb_svc_put(struct gb_svc *svc)
1018 put_device(&svc->dev);