greybus: svc: flush workqueue at connection exit
[cascardo/linux.git] / drivers / staging / greybus / svc.c
1 /*
2  * SVC Greybus driver.
3  *
4  * Copyright 2015 Google Inc.
5  * Copyright 2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13
14 #define CPORT_FLAGS_E2EFC       BIT(0)
15 #define CPORT_FLAGS_CSD_N       BIT(1)
16 #define CPORT_FLAGS_CSV_N       BIT(2)
17
18
19 struct gb_svc_deferred_request {
20         struct work_struct work;
21         struct gb_operation *operation;
22 };
23
24
25 static ssize_t endo_id_show(struct device *dev,
26                         struct device_attribute *attr, char *buf)
27 {
28         struct gb_svc *svc = to_gb_svc(dev);
29
30         return sprintf(buf, "0x%04x\n", svc->endo_id);
31 }
32 static DEVICE_ATTR_RO(endo_id);
33
34 static ssize_t ap_intf_id_show(struct device *dev,
35                         struct device_attribute *attr, char *buf)
36 {
37         struct gb_svc *svc = to_gb_svc(dev);
38
39         return sprintf(buf, "%u\n", svc->ap_intf_id);
40 }
41 static DEVICE_ATTR_RO(ap_intf_id);
42
43 static struct attribute *svc_attrs[] = {
44         &dev_attr_endo_id.attr,
45         &dev_attr_ap_intf_id.attr,
46         NULL,
47 };
48 ATTRIBUTE_GROUPS(svc);
49
50 static int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
51 {
52         struct gb_svc_intf_device_id_request request;
53
54         request.intf_id = intf_id;
55         request.device_id = device_id;
56
57         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
58                                  &request, sizeof(request), NULL, 0);
59 }
60
61 int gb_svc_intf_reset(struct gb_svc *svc, u8 intf_id)
62 {
63         struct gb_svc_intf_reset_request request;
64
65         request.intf_id = intf_id;
66
67         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_RESET,
68                                  &request, sizeof(request), NULL, 0);
69 }
70 EXPORT_SYMBOL_GPL(gb_svc_intf_reset);
71
72 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
73                         u32 *value)
74 {
75         struct gb_svc_dme_peer_get_request request;
76         struct gb_svc_dme_peer_get_response response;
77         u16 result;
78         int ret;
79
80         request.intf_id = intf_id;
81         request.attr = cpu_to_le16(attr);
82         request.selector = cpu_to_le16(selector);
83
84         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
85                                 &request, sizeof(request),
86                                 &response, sizeof(response));
87         if (ret) {
88                 dev_err(&svc->dev, "failed to get DME attribute (%hhu %hx %hu): %d\n",
89                                 intf_id, attr, selector, ret);
90                 return ret;
91         }
92
93         result = le16_to_cpu(response.result_code);
94         if (result) {
95                 dev_err(&svc->dev, "UniPro error while getting DME attribute (%hhu %hx %hu): %hu\n",
96                                 intf_id, attr, selector, result);
97                 return -EINVAL;
98         }
99
100         if (value)
101                 *value = le32_to_cpu(response.attr_value);
102
103         return 0;
104 }
105 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
106
107 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
108                         u32 value)
109 {
110         struct gb_svc_dme_peer_set_request request;
111         struct gb_svc_dme_peer_set_response response;
112         u16 result;
113         int ret;
114
115         request.intf_id = intf_id;
116         request.attr = cpu_to_le16(attr);
117         request.selector = cpu_to_le16(selector);
118         request.value = cpu_to_le32(value);
119
120         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
121                                 &request, sizeof(request),
122                                 &response, sizeof(response));
123         if (ret) {
124                 dev_err(&svc->dev, "failed to set DME attribute (%hhu %hx %hu %u): %d\n",
125                                 intf_id, attr, selector, value, ret);
126                 return ret;
127         }
128
129         result = le16_to_cpu(response.result_code);
130         if (result) {
131                 dev_err(&svc->dev, "UniPro error while setting DME attribute (%hhu %hx %hu %u): %hu\n",
132                                 intf_id, attr, selector, value, result);
133                 return -EINVAL;
134         }
135
136         return 0;
137 }
138 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
139
140 /*
141  * T_TstSrcIncrement is written by the module on ES2 as a stand-in for boot
142  * status attribute. AP needs to read and clear it, after reading a non-zero
143  * value from it.
144  *
145  * FIXME: This is module-hardware dependent and needs to be extended for every
146  * type of module we want to support.
147  */
148 static int gb_svc_read_and_clear_module_boot_status(struct gb_interface *intf)
149 {
150         struct gb_host_device *hd = intf->hd;
151         int ret;
152         u32 value;
153
154         /* Read and clear boot status in T_TstSrcIncrement */
155         ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id,
156                                   DME_ATTR_T_TST_SRC_INCREMENT,
157                                   DME_ATTR_SELECTOR_INDEX, &value);
158
159         if (ret)
160                 return ret;
161
162         /*
163          * A nonzero boot status indicates the module has finished
164          * booting. Clear it.
165          */
166         if (!value) {
167                 dev_err(&intf->dev, "Module not ready yet\n");
168                 return -ENODEV;
169         }
170
171         /*
172          * Check if the module needs to boot from unipro.
173          * For ES2: We need to check lowest 8 bits of 'value'.
174          * For ES3: We need to check highest 8 bits out of 32 of 'value'.
175          *
176          * FIXME: Add code to find if we are on ES2 or ES3 to have separate
177          * checks.
178          */
179         if (value == DME_TSI_UNIPRO_BOOT_STARTED ||
180             value == DME_TSI_FALLBACK_UNIPRO_BOOT_STARTED)
181                 intf->boot_over_unipro = true;
182
183         return gb_svc_dme_peer_set(hd->svc, intf->interface_id,
184                                    DME_ATTR_T_TST_SRC_INCREMENT,
185                                    DME_ATTR_SELECTOR_INDEX, 0);
186 }
187
188 int gb_svc_connection_create(struct gb_svc *svc,
189                                 u8 intf1_id, u16 cport1_id,
190                                 u8 intf2_id, u16 cport2_id,
191                                 bool boot_over_unipro)
192 {
193         struct gb_svc_conn_create_request request;
194
195         request.intf1_id = intf1_id;
196         request.cport1_id = cpu_to_le16(cport1_id);
197         request.intf2_id = intf2_id;
198         request.cport2_id = cpu_to_le16(cport2_id);
199         /*
200          * XXX: fix connections paramaters to TC0 and all CPort flags
201          * for now.
202          */
203         request.tc = 0;
204
205         /*
206          * We need to skip setting E2EFC and other flags to the connection
207          * create request, for all cports, on an interface that need to boot
208          * over unipro, i.e. interfaces required to download firmware.
209          */
210         if (boot_over_unipro)
211                 request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_CSD_N;
212         else
213                 request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_E2EFC;
214
215         return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
216                                  &request, sizeof(request), NULL, 0);
217 }
218 EXPORT_SYMBOL_GPL(gb_svc_connection_create);
219
220 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
221                                u8 intf2_id, u16 cport2_id)
222 {
223         struct gb_svc_conn_destroy_request request;
224         struct gb_connection *connection = svc->connection;
225         int ret;
226
227         request.intf1_id = intf1_id;
228         request.cport1_id = cpu_to_le16(cport1_id);
229         request.intf2_id = intf2_id;
230         request.cport2_id = cpu_to_le16(cport2_id);
231
232         ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
233                                 &request, sizeof(request), NULL, 0);
234         if (ret) {
235                 dev_err(&svc->dev, "failed to destroy connection (%hhu:%hu %hhu:%hu): %d\n",
236                                 intf1_id, cport1_id, intf2_id, cport2_id, ret);
237         }
238 }
239 EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
240
241 /* Creates bi-directional routes between the devices */
242 static int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
243                                u8 intf2_id, u8 dev2_id)
244 {
245         struct gb_svc_route_create_request request;
246
247         request.intf1_id = intf1_id;
248         request.dev1_id = dev1_id;
249         request.intf2_id = intf2_id;
250         request.dev2_id = dev2_id;
251
252         return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
253                                  &request, sizeof(request), NULL, 0);
254 }
255
256 /* Destroys bi-directional routes between the devices */
257 static void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
258 {
259         struct gb_svc_route_destroy_request request;
260         int ret;
261
262         request.intf1_id = intf1_id;
263         request.intf2_id = intf2_id;
264
265         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
266                                 &request, sizeof(request), NULL, 0);
267         if (ret) {
268                 dev_err(&svc->dev, "failed to destroy route (%hhu %hhu): %d\n",
269                                 intf1_id, intf2_id, ret);
270         }
271 }
272
273 static int gb_svc_version_request(struct gb_operation *op)
274 {
275         struct gb_connection *connection = op->connection;
276         struct gb_svc *svc = connection->private;
277         struct gb_protocol_version_request *request;
278         struct gb_protocol_version_response *response;
279
280         if (op->request->payload_size < sizeof(*request)) {
281                 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
282                                 op->request->payload_size,
283                                 sizeof(*request));
284                 return -EINVAL;
285         }
286
287         request = op->request->payload;
288
289         if (request->major > GB_SVC_VERSION_MAJOR) {
290                 dev_warn(&svc->dev, "unsupported major version (%hhu > %hhu)\n",
291                                 request->major, GB_SVC_VERSION_MAJOR);
292                 return -ENOTSUPP;
293         }
294
295         connection->module_major = request->major;
296         connection->module_minor = request->minor;
297
298         if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
299                 return -ENOMEM;
300
301         response = op->response->payload;
302         response->major = connection->module_major;
303         response->minor = connection->module_minor;
304
305         return 0;
306 }
307
308 static int gb_svc_hello(struct gb_operation *op)
309 {
310         struct gb_connection *connection = op->connection;
311         struct gb_svc *svc = connection->private;
312         struct gb_svc_hello_request *hello_request;
313         int ret;
314
315         if (op->request->payload_size < sizeof(*hello_request)) {
316                 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
317                                 op->request->payload_size,
318                                 sizeof(*hello_request));
319                 return -EINVAL;
320         }
321
322         hello_request = op->request->payload;
323         svc->endo_id = le16_to_cpu(hello_request->endo_id);
324         svc->ap_intf_id = hello_request->interface_id;
325
326         ret = device_add(&svc->dev);
327         if (ret) {
328                 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
329                 return ret;
330         }
331
332         return 0;
333 }
334
335 static void gb_svc_intf_remove(struct gb_svc *svc, struct gb_interface *intf)
336 {
337         u8 intf_id = intf->interface_id;
338         u8 device_id;
339
340         device_id = intf->device_id;
341         gb_interface_remove(intf);
342
343         /*
344          * Destroy the two-way route between the AP and the interface.
345          */
346         gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
347
348         ida_simple_remove(&svc->device_id_map, device_id);
349 }
350
351 static void gb_svc_process_intf_hotplug(struct gb_operation *operation)
352 {
353         struct gb_svc_intf_hotplug_request *request;
354         struct gb_connection *connection = operation->connection;
355         struct gb_svc *svc = connection->private;
356         struct gb_host_device *hd = connection->hd;
357         struct gb_interface *intf;
358         u8 intf_id, device_id;
359         int ret;
360
361         /* The request message size has already been verified. */
362         request = operation->request->payload;
363         intf_id = request->intf_id;
364
365         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
366
367         intf = gb_interface_find(hd, intf_id);
368         if (intf) {
369                 /*
370                  * We have received a hotplug request for an interface that
371                  * already exists.
372                  *
373                  * This can happen in cases like:
374                  * - bootrom loading the firmware image and booting into that,
375                  *   which only generates a hotplug event. i.e. no hot-unplug
376                  *   event.
377                  * - Or the firmware on the module crashed and sent hotplug
378                  *   request again to the SVC, which got propagated to AP.
379                  *
380                  * Remove the interface and add it again, and let user know
381                  * about this with a print message.
382                  */
383                 dev_info(&svc->dev, "removing interface %hhu to add it again\n",
384                                 intf_id);
385                 gb_svc_intf_remove(svc, intf);
386         }
387
388         intf = gb_interface_create(hd, intf_id);
389         if (!intf) {
390                 dev_err(&svc->dev, "failed to create interface %hhu\n",
391                                 intf_id);
392                 return;
393         }
394
395         ret = gb_svc_read_and_clear_module_boot_status(intf);
396         if (ret)
397                 goto destroy_interface;
398
399         intf->unipro_mfg_id = le32_to_cpu(request->data.unipro_mfg_id);
400         intf->unipro_prod_id = le32_to_cpu(request->data.unipro_prod_id);
401         intf->vendor_id = le32_to_cpu(request->data.ara_vend_id);
402         intf->product_id = le32_to_cpu(request->data.ara_prod_id);
403
404         /*
405          * Create a device id for the interface:
406          * - device id 0 (GB_DEVICE_ID_SVC) belongs to the SVC
407          * - device id 1 (GB_DEVICE_ID_AP) belongs to the AP
408          *
409          * XXX Do we need to allocate device ID for SVC or the AP here? And what
410          * XXX about an AP with multiple interface blocks?
411          */
412         device_id = ida_simple_get(&svc->device_id_map,
413                                    GB_DEVICE_ID_MODULES_START, 0, GFP_KERNEL);
414         if (device_id < 0) {
415                 ret = device_id;
416                 dev_err(&svc->dev, "failed to allocate device id for interface %hhu: %d\n",
417                                 intf_id, ret);
418                 goto destroy_interface;
419         }
420
421         ret = gb_svc_intf_device_id(svc, intf_id, device_id);
422         if (ret) {
423                 dev_err(&svc->dev, "failed to set device id %hhu for interface %hhu: %d\n",
424                                 device_id, intf_id, ret);
425                 goto ida_put;
426         }
427
428         /*
429          * Create a two-way route between the AP and the new interface
430          */
431         ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_DEVICE_ID_AP,
432                                   intf_id, device_id);
433         if (ret) {
434                 dev_err(&svc->dev, "failed to create route to interface %hhu (device id %hhu): %d\n",
435                                 intf_id, device_id, ret);
436                 goto svc_id_free;
437         }
438
439         ret = gb_interface_init(intf, device_id);
440         if (ret) {
441                 dev_err(&svc->dev, "failed to initialize interface %hhu (device id %hhu): %d\n",
442                                 intf_id, device_id, ret);
443                 goto destroy_route;
444         }
445
446         return;
447
448 destroy_route:
449         gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
450 svc_id_free:
451         /*
452          * XXX Should we tell SVC that this id doesn't belong to interface
453          * XXX anymore.
454          */
455 ida_put:
456         ida_simple_remove(&svc->device_id_map, device_id);
457 destroy_interface:
458         gb_interface_remove(intf);
459 }
460
461 static void gb_svc_process_intf_hot_unplug(struct gb_operation *operation)
462 {
463         struct gb_svc *svc = operation->connection->private;
464         struct gb_svc_intf_hot_unplug_request *request;
465         struct gb_host_device *hd = operation->connection->hd;
466         struct gb_interface *intf;
467         u8 intf_id;
468
469         /* The request message size has already been verified. */
470         request = operation->request->payload;
471         intf_id = request->intf_id;
472
473         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
474
475         intf = gb_interface_find(hd, intf_id);
476         if (!intf) {
477                 dev_warn(&svc->dev, "could not find hot-unplug interface %hhu\n",
478                                 intf_id);
479                 return;
480         }
481
482         gb_svc_intf_remove(svc, intf);
483 }
484
485 static void gb_svc_process_deferred_request(struct work_struct *work)
486 {
487         struct gb_svc_deferred_request *dr;
488         struct gb_operation *operation;
489         struct gb_svc *svc;
490         u8 type;
491
492         dr = container_of(work, struct gb_svc_deferred_request, work);
493         operation = dr->operation;
494         svc = operation->connection->private;
495         type = operation->request->header->type;
496
497         switch (type) {
498         case GB_SVC_TYPE_INTF_HOTPLUG:
499                 gb_svc_process_intf_hotplug(operation);
500                 break;
501         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
502                 gb_svc_process_intf_hot_unplug(operation);
503                 break;
504         default:
505                 dev_err(&svc->dev, "bad deferred request type: %02x\n", type);
506         }
507
508         gb_operation_put(operation);
509         kfree(dr);
510 }
511
512 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
513 {
514         struct gb_svc *svc = operation->connection->private;
515         struct gb_svc_deferred_request *dr;
516
517         dr = kmalloc(sizeof(*dr), GFP_KERNEL);
518         if (!dr)
519                 return -ENOMEM;
520
521         gb_operation_get(operation);
522
523         dr->operation = operation;
524         INIT_WORK(&dr->work, gb_svc_process_deferred_request);
525
526         queue_work(svc->wq, &dr->work);
527
528         return 0;
529 }
530
531 /*
532  * Bringing up a module can be time consuming, as that may require lots of
533  * initialization on the module side. Over that, we may also need to download
534  * the firmware first and flash that on the module.
535  *
536  * In order not to make other svc events wait for all this to finish,
537  * handle most of module hotplug stuff outside of the hotplug callback, with
538  * help of a workqueue.
539  */
540 static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
541 {
542         struct gb_svc *svc = op->connection->private;
543         struct gb_svc_intf_hotplug_request *request;
544
545         if (op->request->payload_size < sizeof(*request)) {
546                 dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n",
547                                 op->request->payload_size, sizeof(*request));
548                 return -EINVAL;
549         }
550
551         request = op->request->payload;
552
553         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
554
555         return gb_svc_queue_deferred_request(op);
556 }
557
558 static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
559 {
560         struct gb_svc *svc = op->connection->private;
561         struct gb_svc_intf_hot_unplug_request *request;
562
563         if (op->request->payload_size < sizeof(*request)) {
564                 dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n",
565                                 op->request->payload_size, sizeof(*request));
566                 return -EINVAL;
567         }
568
569         request = op->request->payload;
570
571         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
572
573         return gb_svc_queue_deferred_request(op);
574 }
575
576 static int gb_svc_intf_reset_recv(struct gb_operation *op)
577 {
578         struct gb_svc *svc = op->connection->private;
579         struct gb_message *request = op->request;
580         struct gb_svc_intf_reset_request *reset;
581         u8 intf_id;
582
583         if (request->payload_size < sizeof(*reset)) {
584                 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
585                                 request->payload_size, sizeof(*reset));
586                 return -EINVAL;
587         }
588         reset = request->payload;
589
590         intf_id = reset->intf_id;
591
592         /* FIXME Reset the interface here */
593
594         return 0;
595 }
596
597 static int gb_svc_request_recv(u8 type, struct gb_operation *op)
598 {
599         struct gb_connection *connection = op->connection;
600         struct gb_svc *svc = connection->private;
601         int ret = 0;
602
603         /*
604          * SVC requests need to follow a specific order (at least initially) and
605          * below code takes care of enforcing that. The expected order is:
606          * - PROTOCOL_VERSION
607          * - SVC_HELLO
608          * - Any other request, but the earlier two.
609          *
610          * Incoming requests are guaranteed to be serialized and so we don't
611          * need to protect 'state' for any races.
612          */
613         switch (type) {
614         case GB_REQUEST_TYPE_PROTOCOL_VERSION:
615                 if (svc->state != GB_SVC_STATE_RESET)
616                         ret = -EINVAL;
617                 break;
618         case GB_SVC_TYPE_SVC_HELLO:
619                 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
620                         ret = -EINVAL;
621                 break;
622         default:
623                 if (svc->state != GB_SVC_STATE_SVC_HELLO)
624                         ret = -EINVAL;
625                 break;
626         }
627
628         if (ret) {
629                 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
630                                 type, svc->state);
631                 return ret;
632         }
633
634         switch (type) {
635         case GB_REQUEST_TYPE_PROTOCOL_VERSION:
636                 ret = gb_svc_version_request(op);
637                 if (!ret)
638                         svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
639                 return ret;
640         case GB_SVC_TYPE_SVC_HELLO:
641                 ret = gb_svc_hello(op);
642                 if (!ret)
643                         svc->state = GB_SVC_STATE_SVC_HELLO;
644                 return ret;
645         case GB_SVC_TYPE_INTF_HOTPLUG:
646                 return gb_svc_intf_hotplug_recv(op);
647         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
648                 return gb_svc_intf_hot_unplug_recv(op);
649         case GB_SVC_TYPE_INTF_RESET:
650                 return gb_svc_intf_reset_recv(op);
651         default:
652                 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
653                 return -EINVAL;
654         }
655 }
656
657 static void gb_svc_release(struct device *dev)
658 {
659         struct gb_svc *svc = to_gb_svc(dev);
660
661         ida_destroy(&svc->device_id_map);
662         destroy_workqueue(svc->wq);
663         kfree(svc);
664 }
665
666 struct device_type greybus_svc_type = {
667         .name           = "greybus_svc",
668         .release        = gb_svc_release,
669 };
670
671 static int gb_svc_connection_init(struct gb_connection *connection)
672 {
673         struct gb_host_device *hd = connection->hd;
674         struct gb_svc *svc;
675
676         svc = kzalloc(sizeof(*svc), GFP_KERNEL);
677         if (!svc)
678                 return -ENOMEM;
679
680         svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
681         if (!svc->wq) {
682                 kfree(svc);
683                 return -ENOMEM;
684         }
685
686         svc->dev.parent = &hd->dev;
687         svc->dev.bus = &greybus_bus_type;
688         svc->dev.type = &greybus_svc_type;
689         svc->dev.groups = svc_groups;
690         svc->dev.dma_mask = svc->dev.parent->dma_mask;
691         device_initialize(&svc->dev);
692
693         dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
694
695         ida_init(&svc->device_id_map);
696         svc->state = GB_SVC_STATE_RESET;
697         svc->connection = connection;
698         connection->private = svc;
699
700         hd->svc = svc;
701
702         return 0;
703 }
704
705 static void gb_svc_connection_exit(struct gb_connection *connection)
706 {
707         struct gb_svc *svc = connection->private;
708
709         if (device_is_registered(&svc->dev))
710                 device_del(&svc->dev);
711
712         flush_workqueue(svc->wq);
713
714         connection->hd->svc = NULL;
715         connection->private = NULL;
716
717         put_device(&svc->dev);
718 }
719
720 static struct gb_protocol svc_protocol = {
721         .name                   = "svc",
722         .id                     = GREYBUS_PROTOCOL_SVC,
723         .major                  = GB_SVC_VERSION_MAJOR,
724         .minor                  = GB_SVC_VERSION_MINOR,
725         .connection_init        = gb_svc_connection_init,
726         .connection_exit        = gb_svc_connection_exit,
727         .request_recv           = gb_svc_request_recv,
728         .flags                  = GB_PROTOCOL_SKIP_CONTROL_CONNECTED |
729                                   GB_PROTOCOL_SKIP_CONTROL_DISCONNECTED |
730                                   GB_PROTOCOL_SKIP_VERSION,
731 };
732 gb_builtin_protocol_driver(svc_protocol);