4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/input.h>
11 #include <linux/workqueue.h>
15 #define SVC_KEY_ARA_BUTTON KEY_A
17 #define SVC_INTF_EJECT_TIMEOUT 9000
19 struct gb_svc_deferred_request {
20 struct work_struct work;
21 struct gb_operation *operation;
25 static ssize_t endo_id_show(struct device *dev,
26 struct device_attribute *attr, char *buf)
28 struct gb_svc *svc = to_gb_svc(dev);
30 return sprintf(buf, "0x%04x\n", svc->endo_id);
32 static DEVICE_ATTR_RO(endo_id);
34 static ssize_t ap_intf_id_show(struct device *dev,
35 struct device_attribute *attr, char *buf)
37 struct gb_svc *svc = to_gb_svc(dev);
39 return sprintf(buf, "%u\n", svc->ap_intf_id);
41 static DEVICE_ATTR_RO(ap_intf_id);
45 // This is a hack, we need to do this "right" and clean the interface up
46 // properly, not just forcibly yank the thing out of the system and hope for the
47 // best. But for now, people want their modules to come out without having to
48 // throw the thing to the ground or get out a screwdriver.
49 static ssize_t intf_eject_store(struct device *dev,
50 struct device_attribute *attr, const char *buf,
53 struct gb_svc *svc = to_gb_svc(dev);
54 unsigned short intf_id;
57 ret = kstrtou16(buf, 10, &intf_id);
61 dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
63 ret = gb_svc_intf_eject(svc, intf_id);
69 static DEVICE_ATTR_WO(intf_eject);
71 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
74 struct gb_svc *svc = to_gb_svc(dev);
76 return sprintf(buf, "%s\n",
77 gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
80 static ssize_t watchdog_store(struct device *dev,
81 struct device_attribute *attr, const char *buf,
84 struct gb_svc *svc = to_gb_svc(dev);
88 retval = strtobool(buf, &user_request);
93 retval = gb_svc_watchdog_enable(svc);
95 retval = gb_svc_watchdog_disable(svc);
100 static DEVICE_ATTR_RW(watchdog);
102 int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
103 u8 measurement_type, u32 *value)
105 struct gb_svc_pwrmon_intf_sample_get_request request;
106 struct gb_svc_pwrmon_intf_sample_get_response response;
109 request.intf_id = intf_id;
110 request.measurement_type = measurement_type;
112 ret = gb_operation_sync(svc->connection,
113 GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
114 &request, sizeof(request),
115 &response, sizeof(response));
117 dev_err(&svc->dev, "failed to get intf sample (%d)\n", ret);
121 if (response.result) {
123 "UniPro error while getting intf power sample (%d %d): %d\n",
124 intf_id, measurement_type, response.result);
125 switch (response.result) {
126 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
128 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
135 *value = le32_to_cpu(response.measurement);
140 static struct attribute *svc_attrs[] = {
141 &dev_attr_endo_id.attr,
142 &dev_attr_ap_intf_id.attr,
143 &dev_attr_intf_eject.attr,
144 &dev_attr_watchdog.attr,
147 ATTRIBUTE_GROUPS(svc);
149 int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
151 struct gb_svc_intf_device_id_request request;
153 request.intf_id = intf_id;
154 request.device_id = device_id;
156 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
157 &request, sizeof(request), NULL, 0);
160 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
162 struct gb_svc_intf_eject_request request;
165 request.intf_id = intf_id;
168 * The pulse width for module release in svc is long so we need to
169 * increase the timeout so the operation will not return to soon.
171 ret = gb_operation_sync_timeout(svc->connection,
172 GB_SVC_TYPE_INTF_EJECT, &request,
173 sizeof(request), NULL, 0,
174 SVC_INTF_EJECT_TIMEOUT);
176 dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
183 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
186 struct gb_svc_dme_peer_get_request request;
187 struct gb_svc_dme_peer_get_response response;
191 request.intf_id = intf_id;
192 request.attr = cpu_to_le16(attr);
193 request.selector = cpu_to_le16(selector);
195 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
196 &request, sizeof(request),
197 &response, sizeof(response));
199 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
200 intf_id, attr, selector, ret);
204 result = le16_to_cpu(response.result_code);
206 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
207 intf_id, attr, selector, result);
212 *value = le32_to_cpu(response.attr_value);
216 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
218 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
221 struct gb_svc_dme_peer_set_request request;
222 struct gb_svc_dme_peer_set_response response;
226 request.intf_id = intf_id;
227 request.attr = cpu_to_le16(attr);
228 request.selector = cpu_to_le16(selector);
229 request.value = cpu_to_le32(value);
231 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
232 &request, sizeof(request),
233 &response, sizeof(response));
235 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
236 intf_id, attr, selector, value, ret);
240 result = le16_to_cpu(response.result_code);
242 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
243 intf_id, attr, selector, value, result);
249 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
251 int gb_svc_connection_create(struct gb_svc *svc,
252 u8 intf1_id, u16 cport1_id,
253 u8 intf2_id, u16 cport2_id,
256 struct gb_svc_conn_create_request request;
258 request.intf1_id = intf1_id;
259 request.cport1_id = cpu_to_le16(cport1_id);
260 request.intf2_id = intf2_id;
261 request.cport2_id = cpu_to_le16(cport2_id);
262 request.tc = 0; /* TC0 */
263 request.flags = cport_flags;
265 return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
266 &request, sizeof(request), NULL, 0);
268 EXPORT_SYMBOL_GPL(gb_svc_connection_create);
270 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
271 u8 intf2_id, u16 cport2_id)
273 struct gb_svc_conn_destroy_request request;
274 struct gb_connection *connection = svc->connection;
277 request.intf1_id = intf1_id;
278 request.cport1_id = cpu_to_le16(cport1_id);
279 request.intf2_id = intf2_id;
280 request.cport2_id = cpu_to_le16(cport2_id);
282 ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
283 &request, sizeof(request), NULL, 0);
285 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
286 intf1_id, cport1_id, intf2_id, cport2_id, ret);
289 EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
291 /* Creates bi-directional routes between the devices */
292 int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
293 u8 intf2_id, u8 dev2_id)
295 struct gb_svc_route_create_request request;
297 request.intf1_id = intf1_id;
298 request.dev1_id = dev1_id;
299 request.intf2_id = intf2_id;
300 request.dev2_id = dev2_id;
302 return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
303 &request, sizeof(request), NULL, 0);
306 /* Destroys bi-directional routes between the devices */
307 void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
309 struct gb_svc_route_destroy_request request;
312 request.intf1_id = intf1_id;
313 request.intf2_id = intf2_id;
315 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
316 &request, sizeof(request), NULL, 0);
318 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
319 intf1_id, intf2_id, ret);
323 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
324 u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
325 u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
326 u8 flags, u32 quirks)
328 struct gb_svc_intf_set_pwrm_request request;
329 struct gb_svc_intf_set_pwrm_response response;
332 request.intf_id = intf_id;
333 request.hs_series = hs_series;
334 request.tx_mode = tx_mode;
335 request.tx_gear = tx_gear;
336 request.tx_nlanes = tx_nlanes;
337 request.rx_mode = rx_mode;
338 request.rx_gear = rx_gear;
339 request.rx_nlanes = rx_nlanes;
340 request.flags = flags;
341 request.quirks = cpu_to_le32(quirks);
343 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
344 &request, sizeof(request),
345 &response, sizeof(response));
349 return le16_to_cpu(response.result_code);
351 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
353 int gb_svc_ping(struct gb_svc *svc)
355 return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
357 GB_OPERATION_TIMEOUT_DEFAULT * 2);
359 EXPORT_SYMBOL_GPL(gb_svc_ping);
361 static int gb_svc_version_request(struct gb_operation *op)
363 struct gb_connection *connection = op->connection;
364 struct gb_svc *svc = gb_connection_get_data(connection);
365 struct gb_protocol_version_request *request;
366 struct gb_protocol_version_response *response;
368 if (op->request->payload_size < sizeof(*request)) {
369 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
370 op->request->payload_size,
375 request = op->request->payload;
377 if (request->major > GB_SVC_VERSION_MAJOR) {
378 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
379 request->major, GB_SVC_VERSION_MAJOR);
383 svc->protocol_major = request->major;
384 svc->protocol_minor = request->minor;
386 if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
389 response = op->response->payload;
390 response->major = svc->protocol_major;
391 response->minor = svc->protocol_minor;
396 static int gb_svc_hello(struct gb_operation *op)
398 struct gb_connection *connection = op->connection;
399 struct gb_svc *svc = gb_connection_get_data(connection);
400 struct gb_svc_hello_request *hello_request;
403 if (op->request->payload_size < sizeof(*hello_request)) {
404 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
405 op->request->payload_size,
406 sizeof(*hello_request));
410 hello_request = op->request->payload;
411 svc->endo_id = le16_to_cpu(hello_request->endo_id);
412 svc->ap_intf_id = hello_request->interface_id;
414 ret = device_add(&svc->dev);
416 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
420 ret = input_register_device(svc->input);
422 dev_err(&svc->dev, "failed to register input: %d\n", ret);
423 device_del(&svc->dev);
427 ret = gb_svc_watchdog_create(svc);
429 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
430 input_unregister_device(svc->input);
431 device_del(&svc->dev);
438 static void gb_svc_intf_remove(struct gb_svc *svc, struct gb_interface *intf)
440 intf->disconnected = true;
442 gb_interface_disable(intf);
443 gb_interface_deactivate(intf);
444 gb_interface_remove(intf);
447 static void gb_svc_process_intf_hotplug(struct gb_operation *operation)
449 struct gb_svc_intf_hotplug_request *request;
450 struct gb_connection *connection = operation->connection;
451 struct gb_svc *svc = gb_connection_get_data(connection);
452 struct gb_host_device *hd = connection->hd;
453 struct gb_interface *intf;
459 /* The request message size has already been verified. */
460 request = operation->request->payload;
461 intf_id = request->intf_id;
463 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
465 intf = gb_interface_find(hd, intf_id);
467 /* HACK: Save Ara VID/PID for ES2 hack below */
468 vendor_id = intf->vendor_id;
469 product_id = intf->product_id;
472 * We have received a hotplug request for an interface that
475 * This can happen in cases like:
476 * - bootrom loading the firmware image and booting into that,
477 * which only generates a hotplug event. i.e. no hot-unplug
479 * - Or the firmware on the module crashed and sent hotplug
480 * request again to the SVC, which got propagated to AP.
482 * Remove the interface and add it again, and let user know
483 * about this with a print message.
485 dev_info(&svc->dev, "removing interface %u to add it again\n",
487 gb_svc_intf_remove(svc, intf);
490 intf = gb_interface_create(hd, intf_id);
492 dev_err(&svc->dev, "failed to create interface %u\n",
497 ret = gb_interface_activate(intf);
499 dev_err(&svc->dev, "failed to activate interface %u: %d\n",
501 goto err_interface_add;
505 * HACK: Use Ara VID/PID from earlier boot stage.
507 * FIXME: remove quirk with ES2 support
509 if (intf->quirks & GB_INTERFACE_QUIRK_NO_ARA_IDS) {
510 intf->vendor_id = vendor_id;
511 intf->product_id = product_id;
514 ret = gb_interface_enable(intf);
516 dev_err(&svc->dev, "failed to enable interface %u: %d\n",
518 goto err_interface_deactivate;
521 ret = gb_interface_add(intf);
523 gb_interface_disable(intf);
524 gb_interface_deactivate(intf);
530 err_interface_deactivate:
531 gb_interface_deactivate(intf);
533 gb_interface_add(intf);
536 static void gb_svc_process_intf_hot_unplug(struct gb_operation *operation)
538 struct gb_svc *svc = gb_connection_get_data(operation->connection);
539 struct gb_svc_intf_hot_unplug_request *request;
540 struct gb_host_device *hd = operation->connection->hd;
541 struct gb_interface *intf;
544 /* The request message size has already been verified. */
545 request = operation->request->payload;
546 intf_id = request->intf_id;
548 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
550 intf = gb_interface_find(hd, intf_id);
552 dev_warn(&svc->dev, "could not find hot-unplug interface %u\n",
557 gb_svc_intf_remove(svc, intf);
560 static void gb_svc_process_deferred_request(struct work_struct *work)
562 struct gb_svc_deferred_request *dr;
563 struct gb_operation *operation;
567 dr = container_of(work, struct gb_svc_deferred_request, work);
568 operation = dr->operation;
569 svc = gb_connection_get_data(operation->connection);
570 type = operation->request->header->type;
573 case GB_SVC_TYPE_INTF_HOTPLUG:
574 gb_svc_process_intf_hotplug(operation);
576 case GB_SVC_TYPE_INTF_HOT_UNPLUG:
577 gb_svc_process_intf_hot_unplug(operation);
580 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
583 gb_operation_put(operation);
587 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
589 struct gb_svc *svc = gb_connection_get_data(operation->connection);
590 struct gb_svc_deferred_request *dr;
592 dr = kmalloc(sizeof(*dr), GFP_KERNEL);
596 gb_operation_get(operation);
598 dr->operation = operation;
599 INIT_WORK(&dr->work, gb_svc_process_deferred_request);
601 queue_work(svc->wq, &dr->work);
607 * Bringing up a module can be time consuming, as that may require lots of
608 * initialization on the module side. Over that, we may also need to download
609 * the firmware first and flash that on the module.
611 * In order not to make other svc events wait for all this to finish,
612 * handle most of module hotplug stuff outside of the hotplug callback, with
613 * help of a workqueue.
615 static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
617 struct gb_svc *svc = gb_connection_get_data(op->connection);
618 struct gb_svc_intf_hotplug_request *request;
620 if (op->request->payload_size < sizeof(*request)) {
621 dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n",
622 op->request->payload_size, sizeof(*request));
626 request = op->request->payload;
628 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
630 return gb_svc_queue_deferred_request(op);
633 static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
635 struct gb_svc *svc = gb_connection_get_data(op->connection);
636 struct gb_svc_intf_hot_unplug_request *request;
638 if (op->request->payload_size < sizeof(*request)) {
639 dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n",
640 op->request->payload_size, sizeof(*request));
644 request = op->request->payload;
646 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
648 return gb_svc_queue_deferred_request(op);
651 static int gb_svc_intf_reset_recv(struct gb_operation *op)
653 struct gb_svc *svc = gb_connection_get_data(op->connection);
654 struct gb_message *request = op->request;
655 struct gb_svc_intf_reset_request *reset;
658 if (request->payload_size < sizeof(*reset)) {
659 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
660 request->payload_size, sizeof(*reset));
663 reset = request->payload;
665 intf_id = reset->intf_id;
667 /* FIXME Reset the interface here */
672 static int gb_svc_key_code_map(struct gb_svc *svc, u16 key_code, u16 *code)
676 *code = SVC_KEY_ARA_BUTTON;
679 dev_warn(&svc->dev, "unknown keycode received: %u\n", key_code);
686 static int gb_svc_key_event_recv(struct gb_operation *op)
688 struct gb_svc *svc = gb_connection_get_data(op->connection);
689 struct gb_message *request = op->request;
690 struct gb_svc_key_event_request *key;
695 if (request->payload_size < sizeof(*key)) {
696 dev_warn(&svc->dev, "short key request received (%zu < %zu)\n",
697 request->payload_size, sizeof(*key));
701 key = request->payload;
703 ret = gb_svc_key_code_map(svc, le16_to_cpu(key->key_code), &code);
707 event = key->key_event;
708 if ((event != GB_SVC_KEY_PRESSED) && (event != GB_SVC_KEY_RELEASED)) {
709 dev_warn(&svc->dev, "unknown key event received: %u\n", event);
713 input_report_key(svc->input, code, (event == GB_SVC_KEY_PRESSED));
714 input_sync(svc->input);
719 static int gb_svc_request_handler(struct gb_operation *op)
721 struct gb_connection *connection = op->connection;
722 struct gb_svc *svc = gb_connection_get_data(connection);
727 * SVC requests need to follow a specific order (at least initially) and
728 * below code takes care of enforcing that. The expected order is:
731 * - Any other request, but the earlier two.
733 * Incoming requests are guaranteed to be serialized and so we don't
734 * need to protect 'state' for any races.
737 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
738 if (svc->state != GB_SVC_STATE_RESET)
741 case GB_SVC_TYPE_SVC_HELLO:
742 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
746 if (svc->state != GB_SVC_STATE_SVC_HELLO)
752 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
758 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
759 ret = gb_svc_version_request(op);
761 svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
763 case GB_SVC_TYPE_SVC_HELLO:
764 ret = gb_svc_hello(op);
766 svc->state = GB_SVC_STATE_SVC_HELLO;
768 case GB_SVC_TYPE_INTF_HOTPLUG:
769 return gb_svc_intf_hotplug_recv(op);
770 case GB_SVC_TYPE_INTF_HOT_UNPLUG:
771 return gb_svc_intf_hot_unplug_recv(op);
772 case GB_SVC_TYPE_INTF_RESET:
773 return gb_svc_intf_reset_recv(op);
774 case GB_SVC_TYPE_KEY_EVENT:
775 return gb_svc_key_event_recv(op);
777 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
782 static struct input_dev *gb_svc_input_create(struct gb_svc *svc)
784 struct input_dev *input_dev;
786 input_dev = input_allocate_device();
788 return ERR_PTR(-ENOMEM);
790 input_dev->name = dev_name(&svc->dev);
791 svc->input_phys = kasprintf(GFP_KERNEL, "greybus-%s/input0",
793 if (!svc->input_phys)
796 input_dev->phys = svc->input_phys;
797 input_dev->dev.parent = &svc->dev;
799 input_set_drvdata(input_dev, svc);
801 input_set_capability(input_dev, EV_KEY, SVC_KEY_ARA_BUTTON);
806 input_free_device(svc->input);
807 return ERR_PTR(-ENOMEM);
810 static void gb_svc_release(struct device *dev)
812 struct gb_svc *svc = to_gb_svc(dev);
815 gb_connection_destroy(svc->connection);
816 ida_destroy(&svc->device_id_map);
817 destroy_workqueue(svc->wq);
818 kfree(svc->input_phys);
822 struct device_type greybus_svc_type = {
823 .name = "greybus_svc",
824 .release = gb_svc_release,
827 struct gb_svc *gb_svc_create(struct gb_host_device *hd)
831 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
835 svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
841 svc->dev.parent = &hd->dev;
842 svc->dev.bus = &greybus_bus_type;
843 svc->dev.type = &greybus_svc_type;
844 svc->dev.groups = svc_groups;
845 svc->dev.dma_mask = svc->dev.parent->dma_mask;
846 device_initialize(&svc->dev);
848 dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
850 ida_init(&svc->device_id_map);
851 svc->state = GB_SVC_STATE_RESET;
854 svc->input = gb_svc_input_create(svc);
855 if (IS_ERR(svc->input)) {
856 dev_err(&svc->dev, "failed to create input device: %ld\n",
857 PTR_ERR(svc->input));
861 svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
862 gb_svc_request_handler);
863 if (IS_ERR(svc->connection)) {
864 dev_err(&svc->dev, "failed to create connection: %ld\n",
865 PTR_ERR(svc->connection));
869 gb_connection_set_data(svc->connection, svc);
874 input_free_device(svc->input);
876 put_device(&svc->dev);
880 int gb_svc_add(struct gb_svc *svc)
885 * The SVC protocol is currently driven by the SVC, so the SVC device
886 * is added from the connection request handler when enough
887 * information has been received.
889 ret = gb_connection_enable(svc->connection);
896 static void gb_svc_remove_interfaces(struct gb_svc *svc)
898 struct gb_interface *intf, *tmp;
900 list_for_each_entry_safe(intf, tmp, &svc->hd->interfaces, links) {
901 gb_interface_disable(intf);
902 gb_interface_remove(intf);
906 void gb_svc_del(struct gb_svc *svc)
908 gb_connection_disable(svc->connection);
911 * The SVC device and input device may have been registered
912 * from the request handler.
914 if (device_is_registered(&svc->dev)) {
915 gb_svc_watchdog_destroy(svc);
916 input_unregister_device(svc->input);
917 device_del(&svc->dev);
920 flush_workqueue(svc->wq);
922 gb_svc_remove_interfaces(svc);
925 void gb_svc_put(struct gb_svc *svc)
927 put_device(&svc->dev);