greybus: svc: add interface eject operation
[cascardo/linux.git] / drivers / staging / greybus / svc.c
1 /*
2  * SVC Greybus driver.
3  *
4  * Copyright 2015 Google Inc.
5  * Copyright 2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13
14 #define CPORT_FLAGS_E2EFC       BIT(0)
15 #define CPORT_FLAGS_CSD_N       BIT(1)
16 #define CPORT_FLAGS_CSV_N       BIT(2)
17
18
19 struct gb_svc_deferred_request {
20         struct work_struct work;
21         struct gb_operation *operation;
22 };
23
24
25 static ssize_t endo_id_show(struct device *dev,
26                         struct device_attribute *attr, char *buf)
27 {
28         struct gb_svc *svc = to_gb_svc(dev);
29
30         return sprintf(buf, "0x%04x\n", svc->endo_id);
31 }
32 static DEVICE_ATTR_RO(endo_id);
33
34 static ssize_t ap_intf_id_show(struct device *dev,
35                         struct device_attribute *attr, char *buf)
36 {
37         struct gb_svc *svc = to_gb_svc(dev);
38
39         return sprintf(buf, "%u\n", svc->ap_intf_id);
40 }
41 static DEVICE_ATTR_RO(ap_intf_id);
42
43 static struct attribute *svc_attrs[] = {
44         &dev_attr_endo_id.attr,
45         &dev_attr_ap_intf_id.attr,
46         NULL,
47 };
48 ATTRIBUTE_GROUPS(svc);
49
50 static int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
51 {
52         struct gb_svc_intf_device_id_request request;
53
54         request.intf_id = intf_id;
55         request.device_id = device_id;
56
57         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
58                                  &request, sizeof(request), NULL, 0);
59 }
60
61 int gb_svc_intf_reset(struct gb_svc *svc, u8 intf_id)
62 {
63         struct gb_svc_intf_reset_request request;
64
65         request.intf_id = intf_id;
66
67         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_RESET,
68                                  &request, sizeof(request), NULL, 0);
69 }
70 EXPORT_SYMBOL_GPL(gb_svc_intf_reset);
71
72 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
73 {
74         struct gb_svc_intf_eject_request request;
75
76         request.intf_id = intf_id;
77
78         /*
79          * The pulse width for module release in svc is long so we need to
80          * increase the timeout so the operation will not return to soon.
81          */
82         return gb_operation_sync_timeout(svc->connection,
83                                          GB_SVC_TYPE_INTF_EJECT, &request,
84                                          sizeof(request), NULL, 0,
85                                          GB_SVC_EJECT_TIME);
86 }
87 EXPORT_SYMBOL_GPL(gb_svc_intf_eject);
88
89 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
90                         u32 *value)
91 {
92         struct gb_svc_dme_peer_get_request request;
93         struct gb_svc_dme_peer_get_response response;
94         u16 result;
95         int ret;
96
97         request.intf_id = intf_id;
98         request.attr = cpu_to_le16(attr);
99         request.selector = cpu_to_le16(selector);
100
101         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
102                                 &request, sizeof(request),
103                                 &response, sizeof(response));
104         if (ret) {
105                 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
106                                 intf_id, attr, selector, ret);
107                 return ret;
108         }
109
110         result = le16_to_cpu(response.result_code);
111         if (result) {
112                 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
113                                 intf_id, attr, selector, result);
114                 return -EIO;
115         }
116
117         if (value)
118                 *value = le32_to_cpu(response.attr_value);
119
120         return 0;
121 }
122 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
123
124 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
125                         u32 value)
126 {
127         struct gb_svc_dme_peer_set_request request;
128         struct gb_svc_dme_peer_set_response response;
129         u16 result;
130         int ret;
131
132         request.intf_id = intf_id;
133         request.attr = cpu_to_le16(attr);
134         request.selector = cpu_to_le16(selector);
135         request.value = cpu_to_le32(value);
136
137         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
138                                 &request, sizeof(request),
139                                 &response, sizeof(response));
140         if (ret) {
141                 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
142                                 intf_id, attr, selector, value, ret);
143                 return ret;
144         }
145
146         result = le16_to_cpu(response.result_code);
147         if (result) {
148                 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
149                                 intf_id, attr, selector, value, result);
150                 return -EIO;
151         }
152
153         return 0;
154 }
155 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
156
157 /*
158  * T_TstSrcIncrement is written by the module on ES2 as a stand-in for boot
159  * status attribute ES3_INIT_STATUS. AP needs to read and clear it, after
160  * reading a non-zero value from it.
161  *
162  * FIXME: This is module-hardware dependent and needs to be extended for every
163  * type of module we want to support.
164  */
165 static int gb_svc_read_and_clear_module_boot_status(struct gb_interface *intf)
166 {
167         struct gb_host_device *hd = intf->hd;
168         int ret;
169         u32 value;
170         u16 attr;
171         u8 init_status;
172
173         /*
174          * Check if the module is ES2 or ES3, and choose attr number
175          * appropriately.
176          * FIXME: Remove ES2 support from the kernel entirely.
177          */
178         if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
179                                 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID)
180                 attr = DME_ATTR_T_TST_SRC_INCREMENT;
181         else
182                 attr = DME_ATTR_ES3_INIT_STATUS;
183
184         /* Read and clear boot status in ES3_INIT_STATUS */
185         ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
186                                   DME_ATTR_SELECTOR_INDEX, &value);
187
188         if (ret)
189                 return ret;
190
191         /*
192          * A nonzero boot status indicates the module has finished
193          * booting. Clear it.
194          */
195         if (!value) {
196                 dev_err(&intf->dev, "Module not ready yet\n");
197                 return -ENODEV;
198         }
199
200         /*
201          * Check if the module needs to boot from UniPro.
202          * For ES2: We need to check lowest 8 bits of 'value'.
203          * For ES3: We need to check highest 8 bits out of 32 of 'value'.
204          * FIXME: Remove ES2 support from the kernel entirely.
205          */
206         if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
207                                 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID)
208                 init_status = value;
209         else
210                 init_status = value >> 24;
211
212         if (init_status == DME_DIS_UNIPRO_BOOT_STARTED ||
213                                 init_status == DME_DIS_FALLBACK_UNIPRO_BOOT_STARTED)
214                 intf->boot_over_unipro = true;
215
216         return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
217                                    DME_ATTR_SELECTOR_INDEX, 0);
218 }
219
220 int gb_svc_connection_create(struct gb_svc *svc,
221                                 u8 intf1_id, u16 cport1_id,
222                                 u8 intf2_id, u16 cport2_id,
223                                 bool boot_over_unipro)
224 {
225         struct gb_svc_conn_create_request request;
226
227         request.intf1_id = intf1_id;
228         request.cport1_id = cpu_to_le16(cport1_id);
229         request.intf2_id = intf2_id;
230         request.cport2_id = cpu_to_le16(cport2_id);
231         /*
232          * XXX: fix connections paramaters to TC0 and all CPort flags
233          * for now.
234          */
235         request.tc = 0;
236
237         /*
238          * We need to skip setting E2EFC and other flags to the connection
239          * create request, for all cports, on an interface that need to boot
240          * over unipro, i.e. interfaces required to download firmware.
241          */
242         if (boot_over_unipro)
243                 request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_CSD_N;
244         else
245                 request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_E2EFC;
246
247         return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
248                                  &request, sizeof(request), NULL, 0);
249 }
250 EXPORT_SYMBOL_GPL(gb_svc_connection_create);
251
252 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
253                                u8 intf2_id, u16 cport2_id)
254 {
255         struct gb_svc_conn_destroy_request request;
256         struct gb_connection *connection = svc->connection;
257         int ret;
258
259         request.intf1_id = intf1_id;
260         request.cport1_id = cpu_to_le16(cport1_id);
261         request.intf2_id = intf2_id;
262         request.cport2_id = cpu_to_le16(cport2_id);
263
264         ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
265                                 &request, sizeof(request), NULL, 0);
266         if (ret) {
267                 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
268                                 intf1_id, cport1_id, intf2_id, cport2_id, ret);
269         }
270 }
271 EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
272
273 /* Creates bi-directional routes between the devices */
274 static int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
275                                u8 intf2_id, u8 dev2_id)
276 {
277         struct gb_svc_route_create_request request;
278
279         request.intf1_id = intf1_id;
280         request.dev1_id = dev1_id;
281         request.intf2_id = intf2_id;
282         request.dev2_id = dev2_id;
283
284         return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
285                                  &request, sizeof(request), NULL, 0);
286 }
287
288 /* Destroys bi-directional routes between the devices */
289 static void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
290 {
291         struct gb_svc_route_destroy_request request;
292         int ret;
293
294         request.intf1_id = intf1_id;
295         request.intf2_id = intf2_id;
296
297         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
298                                 &request, sizeof(request), NULL, 0);
299         if (ret) {
300                 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
301                                 intf1_id, intf2_id, ret);
302         }
303 }
304
305 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
306                                u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
307                                u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
308                                u8 flags, u32 quirks)
309 {
310         struct gb_svc_intf_set_pwrm_request request;
311         struct gb_svc_intf_set_pwrm_response response;
312         int ret;
313
314         request.intf_id = intf_id;
315         request.hs_series = hs_series;
316         request.tx_mode = tx_mode;
317         request.tx_gear = tx_gear;
318         request.tx_nlanes = tx_nlanes;
319         request.rx_mode = rx_mode;
320         request.rx_gear = rx_gear;
321         request.rx_nlanes = rx_nlanes;
322         request.flags = flags;
323         request.quirks = cpu_to_le32(quirks);
324
325         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
326                                 &request, sizeof(request),
327                                 &response, sizeof(response));
328         if (ret < 0)
329                 return ret;
330
331         return le16_to_cpu(response.result_code);
332 }
333 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
334
335 static int gb_svc_version_request(struct gb_operation *op)
336 {
337         struct gb_connection *connection = op->connection;
338         struct gb_svc *svc = connection->private;
339         struct gb_protocol_version_request *request;
340         struct gb_protocol_version_response *response;
341
342         if (op->request->payload_size < sizeof(*request)) {
343                 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
344                                 op->request->payload_size,
345                                 sizeof(*request));
346                 return -EINVAL;
347         }
348
349         request = op->request->payload;
350
351         if (request->major > GB_SVC_VERSION_MAJOR) {
352                 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
353                                 request->major, GB_SVC_VERSION_MAJOR);
354                 return -ENOTSUPP;
355         }
356
357         connection->module_major = request->major;
358         connection->module_minor = request->minor;
359
360         if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
361                 return -ENOMEM;
362
363         response = op->response->payload;
364         response->major = connection->module_major;
365         response->minor = connection->module_minor;
366
367         return 0;
368 }
369
370 static int gb_svc_hello(struct gb_operation *op)
371 {
372         struct gb_connection *connection = op->connection;
373         struct gb_svc *svc = connection->private;
374         struct gb_svc_hello_request *hello_request;
375         int ret;
376
377         if (op->request->payload_size < sizeof(*hello_request)) {
378                 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
379                                 op->request->payload_size,
380                                 sizeof(*hello_request));
381                 return -EINVAL;
382         }
383
384         hello_request = op->request->payload;
385         svc->endo_id = le16_to_cpu(hello_request->endo_id);
386         svc->ap_intf_id = hello_request->interface_id;
387
388         ret = device_add(&svc->dev);
389         if (ret) {
390                 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
391                 return ret;
392         }
393
394         return 0;
395 }
396
397 static void gb_svc_intf_remove(struct gb_svc *svc, struct gb_interface *intf)
398 {
399         u8 intf_id = intf->interface_id;
400         u8 device_id = intf->device_id;
401
402         intf->disconnected = true;
403
404         gb_interface_remove(intf);
405
406         /*
407          * Destroy the two-way route between the AP and the interface.
408          */
409         gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
410
411         ida_simple_remove(&svc->device_id_map, device_id);
412 }
413
414 static void gb_svc_process_intf_hotplug(struct gb_operation *operation)
415 {
416         struct gb_svc_intf_hotplug_request *request;
417         struct gb_connection *connection = operation->connection;
418         struct gb_svc *svc = connection->private;
419         struct gb_host_device *hd = connection->hd;
420         struct gb_interface *intf;
421         u8 intf_id, device_id;
422         int ret;
423
424         /* The request message size has already been verified. */
425         request = operation->request->payload;
426         intf_id = request->intf_id;
427
428         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
429
430         intf = gb_interface_find(hd, intf_id);
431         if (intf) {
432                 /*
433                  * We have received a hotplug request for an interface that
434                  * already exists.
435                  *
436                  * This can happen in cases like:
437                  * - bootrom loading the firmware image and booting into that,
438                  *   which only generates a hotplug event. i.e. no hot-unplug
439                  *   event.
440                  * - Or the firmware on the module crashed and sent hotplug
441                  *   request again to the SVC, which got propagated to AP.
442                  *
443                  * Remove the interface and add it again, and let user know
444                  * about this with a print message.
445                  */
446                 dev_info(&svc->dev, "removing interface %u to add it again\n",
447                                 intf_id);
448                 gb_svc_intf_remove(svc, intf);
449         }
450
451         intf = gb_interface_create(hd, intf_id);
452         if (!intf) {
453                 dev_err(&svc->dev, "failed to create interface %u\n",
454                                 intf_id);
455                 return;
456         }
457
458         intf->ddbl1_manufacturer_id = le32_to_cpu(request->data.ddbl1_mfr_id);
459         intf->ddbl1_product_id = le32_to_cpu(request->data.ddbl1_prod_id);
460         intf->vendor_id = le32_to_cpu(request->data.ara_vend_id);
461         intf->product_id = le32_to_cpu(request->data.ara_prod_id);
462         intf->serial_number = le64_to_cpu(request->data.serial_number);
463
464         ret = gb_svc_read_and_clear_module_boot_status(intf);
465         if (ret) {
466                 dev_err(&svc->dev, "failed to clear boot status of interface %u: %d\n",
467                                 intf_id, ret);
468                 goto destroy_interface;
469         }
470
471         /*
472          * Create a device id for the interface:
473          * - device id 0 (GB_DEVICE_ID_SVC) belongs to the SVC
474          * - device id 1 (GB_DEVICE_ID_AP) belongs to the AP
475          *
476          * XXX Do we need to allocate device ID for SVC or the AP here? And what
477          * XXX about an AP with multiple interface blocks?
478          */
479         device_id = ida_simple_get(&svc->device_id_map,
480                                    GB_DEVICE_ID_MODULES_START, 0, GFP_KERNEL);
481         if (device_id < 0) {
482                 ret = device_id;
483                 dev_err(&svc->dev, "failed to allocate device id for interface %u: %d\n",
484                                 intf_id, ret);
485                 goto destroy_interface;
486         }
487
488         ret = gb_svc_intf_device_id(svc, intf_id, device_id);
489         if (ret) {
490                 dev_err(&svc->dev, "failed to set device id %u for interface %u: %d\n",
491                                 device_id, intf_id, ret);
492                 goto ida_put;
493         }
494
495         /*
496          * Create a two-way route between the AP and the new interface
497          */
498         ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_DEVICE_ID_AP,
499                                   intf_id, device_id);
500         if (ret) {
501                 dev_err(&svc->dev, "failed to create route to interface %u (device id %u): %d\n",
502                                 intf_id, device_id, ret);
503                 goto svc_id_free;
504         }
505
506         ret = gb_interface_init(intf, device_id);
507         if (ret) {
508                 dev_err(&svc->dev, "failed to initialize interface %u (device id %u): %d\n",
509                                 intf_id, device_id, ret);
510                 goto destroy_route;
511         }
512
513         return;
514
515 destroy_route:
516         gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
517 svc_id_free:
518         /*
519          * XXX Should we tell SVC that this id doesn't belong to interface
520          * XXX anymore.
521          */
522 ida_put:
523         ida_simple_remove(&svc->device_id_map, device_id);
524 destroy_interface:
525         gb_interface_remove(intf);
526 }
527
528 static void gb_svc_process_intf_hot_unplug(struct gb_operation *operation)
529 {
530         struct gb_svc *svc = operation->connection->private;
531         struct gb_svc_intf_hot_unplug_request *request;
532         struct gb_host_device *hd = operation->connection->hd;
533         struct gb_interface *intf;
534         u8 intf_id;
535
536         /* The request message size has already been verified. */
537         request = operation->request->payload;
538         intf_id = request->intf_id;
539
540         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
541
542         intf = gb_interface_find(hd, intf_id);
543         if (!intf) {
544                 dev_warn(&svc->dev, "could not find hot-unplug interface %u\n",
545                                 intf_id);
546                 return;
547         }
548
549         gb_svc_intf_remove(svc, intf);
550 }
551
552 static void gb_svc_process_deferred_request(struct work_struct *work)
553 {
554         struct gb_svc_deferred_request *dr;
555         struct gb_operation *operation;
556         struct gb_svc *svc;
557         u8 type;
558
559         dr = container_of(work, struct gb_svc_deferred_request, work);
560         operation = dr->operation;
561         svc = operation->connection->private;
562         type = operation->request->header->type;
563
564         switch (type) {
565         case GB_SVC_TYPE_INTF_HOTPLUG:
566                 gb_svc_process_intf_hotplug(operation);
567                 break;
568         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
569                 gb_svc_process_intf_hot_unplug(operation);
570                 break;
571         default:
572                 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
573         }
574
575         gb_operation_put(operation);
576         kfree(dr);
577 }
578
579 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
580 {
581         struct gb_svc *svc = operation->connection->private;
582         struct gb_svc_deferred_request *dr;
583
584         dr = kmalloc(sizeof(*dr), GFP_KERNEL);
585         if (!dr)
586                 return -ENOMEM;
587
588         gb_operation_get(operation);
589
590         dr->operation = operation;
591         INIT_WORK(&dr->work, gb_svc_process_deferred_request);
592
593         queue_work(svc->wq, &dr->work);
594
595         return 0;
596 }
597
598 /*
599  * Bringing up a module can be time consuming, as that may require lots of
600  * initialization on the module side. Over that, we may also need to download
601  * the firmware first and flash that on the module.
602  *
603  * In order not to make other svc events wait for all this to finish,
604  * handle most of module hotplug stuff outside of the hotplug callback, with
605  * help of a workqueue.
606  */
607 static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
608 {
609         struct gb_svc *svc = op->connection->private;
610         struct gb_svc_intf_hotplug_request *request;
611
612         if (op->request->payload_size < sizeof(*request)) {
613                 dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n",
614                                 op->request->payload_size, sizeof(*request));
615                 return -EINVAL;
616         }
617
618         request = op->request->payload;
619
620         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
621
622         return gb_svc_queue_deferred_request(op);
623 }
624
625 static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
626 {
627         struct gb_svc *svc = op->connection->private;
628         struct gb_svc_intf_hot_unplug_request *request;
629
630         if (op->request->payload_size < sizeof(*request)) {
631                 dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n",
632                                 op->request->payload_size, sizeof(*request));
633                 return -EINVAL;
634         }
635
636         request = op->request->payload;
637
638         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
639
640         return gb_svc_queue_deferred_request(op);
641 }
642
643 static int gb_svc_intf_reset_recv(struct gb_operation *op)
644 {
645         struct gb_svc *svc = op->connection->private;
646         struct gb_message *request = op->request;
647         struct gb_svc_intf_reset_request *reset;
648         u8 intf_id;
649
650         if (request->payload_size < sizeof(*reset)) {
651                 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
652                                 request->payload_size, sizeof(*reset));
653                 return -EINVAL;
654         }
655         reset = request->payload;
656
657         intf_id = reset->intf_id;
658
659         /* FIXME Reset the interface here */
660
661         return 0;
662 }
663
664 static int gb_svc_request_recv(u8 type, struct gb_operation *op)
665 {
666         struct gb_connection *connection = op->connection;
667         struct gb_svc *svc = connection->private;
668         int ret = 0;
669
670         /*
671          * SVC requests need to follow a specific order (at least initially) and
672          * below code takes care of enforcing that. The expected order is:
673          * - PROTOCOL_VERSION
674          * - SVC_HELLO
675          * - Any other request, but the earlier two.
676          *
677          * Incoming requests are guaranteed to be serialized and so we don't
678          * need to protect 'state' for any races.
679          */
680         switch (type) {
681         case GB_REQUEST_TYPE_PROTOCOL_VERSION:
682                 if (svc->state != GB_SVC_STATE_RESET)
683                         ret = -EINVAL;
684                 break;
685         case GB_SVC_TYPE_SVC_HELLO:
686                 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
687                         ret = -EINVAL;
688                 break;
689         default:
690                 if (svc->state != GB_SVC_STATE_SVC_HELLO)
691                         ret = -EINVAL;
692                 break;
693         }
694
695         if (ret) {
696                 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
697                                 type, svc->state);
698                 return ret;
699         }
700
701         switch (type) {
702         case GB_REQUEST_TYPE_PROTOCOL_VERSION:
703                 ret = gb_svc_version_request(op);
704                 if (!ret)
705                         svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
706                 return ret;
707         case GB_SVC_TYPE_SVC_HELLO:
708                 ret = gb_svc_hello(op);
709                 if (!ret)
710                         svc->state = GB_SVC_STATE_SVC_HELLO;
711                 return ret;
712         case GB_SVC_TYPE_INTF_HOTPLUG:
713                 return gb_svc_intf_hotplug_recv(op);
714         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
715                 return gb_svc_intf_hot_unplug_recv(op);
716         case GB_SVC_TYPE_INTF_RESET:
717                 return gb_svc_intf_reset_recv(op);
718         default:
719                 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
720                 return -EINVAL;
721         }
722 }
723
724 static void gb_svc_release(struct device *dev)
725 {
726         struct gb_svc *svc = to_gb_svc(dev);
727
728         if (svc->connection)
729                 gb_connection_destroy(svc->connection);
730         ida_destroy(&svc->device_id_map);
731         destroy_workqueue(svc->wq);
732         kfree(svc);
733 }
734
735 struct device_type greybus_svc_type = {
736         .name           = "greybus_svc",
737         .release        = gb_svc_release,
738 };
739
740 struct gb_svc *gb_svc_create(struct gb_host_device *hd)
741 {
742         struct gb_svc *svc;
743
744         svc = kzalloc(sizeof(*svc), GFP_KERNEL);
745         if (!svc)
746                 return NULL;
747
748         svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
749         if (!svc->wq) {
750                 kfree(svc);
751                 return NULL;
752         }
753
754         svc->dev.parent = &hd->dev;
755         svc->dev.bus = &greybus_bus_type;
756         svc->dev.type = &greybus_svc_type;
757         svc->dev.groups = svc_groups;
758         svc->dev.dma_mask = svc->dev.parent->dma_mask;
759         device_initialize(&svc->dev);
760
761         dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
762
763         ida_init(&svc->device_id_map);
764         svc->state = GB_SVC_STATE_RESET;
765         svc->hd = hd;
766
767         svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
768                                                         GREYBUS_PROTOCOL_SVC);
769         if (!svc->connection) {
770                 dev_err(&svc->dev, "failed to create connection\n");
771                 put_device(&svc->dev);
772                 return NULL;
773         }
774
775         svc->connection->private = svc;
776
777         return svc;
778 }
779
780 int gb_svc_add(struct gb_svc *svc)
781 {
782         int ret;
783
784         /*
785          * The SVC protocol is currently driven by the SVC, so the SVC device
786          * is added from the connection request handler when enough
787          * information has been received.
788          */
789         ret = gb_connection_init(svc->connection);
790         if (ret)
791                 return ret;
792
793         return 0;
794 }
795
796 void gb_svc_del(struct gb_svc *svc)
797 {
798         /*
799          * The SVC device may have been registered from the request handler.
800          */
801         if (device_is_registered(&svc->dev))
802                 device_del(&svc->dev);
803
804         gb_connection_exit(svc->connection);
805
806         flush_workqueue(svc->wq);
807 }
808
809 void gb_svc_put(struct gb_svc *svc)
810 {
811         put_device(&svc->dev);
812 }
813
814 static int gb_svc_connection_init(struct gb_connection *connection)
815 {
816         struct gb_svc *svc = connection->private;
817
818         dev_dbg(&svc->dev, "%s\n", __func__);
819
820         return 0;
821 }
822
823 static void gb_svc_connection_exit(struct gb_connection *connection)
824 {
825         struct gb_svc *svc = connection->private;
826
827         dev_dbg(&svc->dev, "%s\n", __func__);
828 }
829
830 static struct gb_protocol svc_protocol = {
831         .name                   = "svc",
832         .id                     = GREYBUS_PROTOCOL_SVC,
833         .major                  = GB_SVC_VERSION_MAJOR,
834         .minor                  = GB_SVC_VERSION_MINOR,
835         .connection_init        = gb_svc_connection_init,
836         .connection_exit        = gb_svc_connection_exit,
837         .request_recv           = gb_svc_request_recv,
838         .flags                  = GB_PROTOCOL_SKIP_CONTROL_CONNECTED |
839                                   GB_PROTOCOL_SKIP_CONTROL_DISCONNECTED |
840                                   GB_PROTOCOL_SKIP_VERSION,
841 };
842 gb_builtin_protocol_driver(svc_protocol);