greybus: svc: refactor interface-route creation
[cascardo/linux.git] / drivers / staging / greybus / svc.c
1 /*
2  * SVC Greybus driver.
3  *
4  * Copyright 2015 Google Inc.
5  * Copyright 2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/input.h>
11 #include <linux/workqueue.h>
12
13 #include "greybus.h"
14
15 #define SVC_KEY_ARA_BUTTON      KEY_A
16
17 struct gb_svc_deferred_request {
18         struct work_struct work;
19         struct gb_operation *operation;
20 };
21
22
23 static ssize_t endo_id_show(struct device *dev,
24                         struct device_attribute *attr, char *buf)
25 {
26         struct gb_svc *svc = to_gb_svc(dev);
27
28         return sprintf(buf, "0x%04x\n", svc->endo_id);
29 }
30 static DEVICE_ATTR_RO(endo_id);
31
32 static ssize_t ap_intf_id_show(struct device *dev,
33                         struct device_attribute *attr, char *buf)
34 {
35         struct gb_svc *svc = to_gb_svc(dev);
36
37         return sprintf(buf, "%u\n", svc->ap_intf_id);
38 }
39 static DEVICE_ATTR_RO(ap_intf_id);
40
41
42 // FIXME
43 // This is a hack, we need to do this "right" and clean the interface up
44 // properly, not just forcibly yank the thing out of the system and hope for the
45 // best.  But for now, people want their modules to come out without having to
46 // throw the thing to the ground or get out a screwdriver.
47 static ssize_t intf_eject_store(struct device *dev,
48                                 struct device_attribute *attr, const char *buf,
49                                 size_t len)
50 {
51         struct gb_svc *svc = to_gb_svc(dev);
52         unsigned short intf_id;
53         int ret;
54
55         ret = kstrtou16(buf, 10, &intf_id);
56         if (ret < 0)
57                 return ret;
58
59         dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
60
61         ret = gb_svc_intf_eject(svc, intf_id);
62         if (ret < 0)
63                 return ret;
64
65         return len;
66 }
67 static DEVICE_ATTR_WO(intf_eject);
68
69 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
70                              char *buf)
71 {
72         struct gb_svc *svc = to_gb_svc(dev);
73
74         return sprintf(buf, "%s\n",
75                        gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
76 }
77
78 static ssize_t watchdog_store(struct device *dev,
79                               struct device_attribute *attr, const char *buf,
80                               size_t len)
81 {
82         struct gb_svc *svc = to_gb_svc(dev);
83         int retval;
84         bool user_request;
85
86         retval = strtobool(buf, &user_request);
87         if (retval)
88                 return retval;
89
90         if (user_request)
91                 retval = gb_svc_watchdog_enable(svc);
92         else
93                 retval = gb_svc_watchdog_disable(svc);
94         if (retval)
95                 return retval;
96         return len;
97 }
98 static DEVICE_ATTR_RW(watchdog);
99
100 static struct attribute *svc_attrs[] = {
101         &dev_attr_endo_id.attr,
102         &dev_attr_ap_intf_id.attr,
103         &dev_attr_intf_eject.attr,
104         &dev_attr_watchdog.attr,
105         NULL,
106 };
107 ATTRIBUTE_GROUPS(svc);
108
109 static int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
110 {
111         struct gb_svc_intf_device_id_request request;
112
113         request.intf_id = intf_id;
114         request.device_id = device_id;
115
116         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
117                                  &request, sizeof(request), NULL, 0);
118 }
119
120 int gb_svc_intf_reset(struct gb_svc *svc, u8 intf_id)
121 {
122         struct gb_svc_intf_reset_request request;
123
124         request.intf_id = intf_id;
125
126         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_RESET,
127                                  &request, sizeof(request), NULL, 0);
128 }
129 EXPORT_SYMBOL_GPL(gb_svc_intf_reset);
130
131 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
132 {
133         struct gb_svc_intf_eject_request request;
134
135         request.intf_id = intf_id;
136
137         /*
138          * The pulse width for module release in svc is long so we need to
139          * increase the timeout so the operation will not return to soon.
140          */
141         return gb_operation_sync_timeout(svc->connection,
142                                          GB_SVC_TYPE_INTF_EJECT, &request,
143                                          sizeof(request), NULL, 0,
144                                          GB_SVC_EJECT_TIME);
145 }
146 EXPORT_SYMBOL_GPL(gb_svc_intf_eject);
147
148 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
149                         u32 *value)
150 {
151         struct gb_svc_dme_peer_get_request request;
152         struct gb_svc_dme_peer_get_response response;
153         u16 result;
154         int ret;
155
156         request.intf_id = intf_id;
157         request.attr = cpu_to_le16(attr);
158         request.selector = cpu_to_le16(selector);
159
160         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
161                                 &request, sizeof(request),
162                                 &response, sizeof(response));
163         if (ret) {
164                 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
165                                 intf_id, attr, selector, ret);
166                 return ret;
167         }
168
169         result = le16_to_cpu(response.result_code);
170         if (result) {
171                 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
172                                 intf_id, attr, selector, result);
173                 return -EIO;
174         }
175
176         if (value)
177                 *value = le32_to_cpu(response.attr_value);
178
179         return 0;
180 }
181 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
182
183 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
184                         u32 value)
185 {
186         struct gb_svc_dme_peer_set_request request;
187         struct gb_svc_dme_peer_set_response response;
188         u16 result;
189         int ret;
190
191         request.intf_id = intf_id;
192         request.attr = cpu_to_le16(attr);
193         request.selector = cpu_to_le16(selector);
194         request.value = cpu_to_le32(value);
195
196         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
197                                 &request, sizeof(request),
198                                 &response, sizeof(response));
199         if (ret) {
200                 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
201                                 intf_id, attr, selector, value, ret);
202                 return ret;
203         }
204
205         result = le16_to_cpu(response.result_code);
206         if (result) {
207                 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
208                                 intf_id, attr, selector, value, result);
209                 return -EIO;
210         }
211
212         return 0;
213 }
214 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
215
216 /*
217  * T_TstSrcIncrement is written by the module on ES2 as a stand-in for boot
218  * status attribute ES3_INIT_STATUS. AP needs to read and clear it, after
219  * reading a non-zero value from it.
220  *
221  * FIXME: This is module-hardware dependent and needs to be extended for every
222  * type of module we want to support.
223  */
224 static int gb_svc_read_and_clear_module_boot_status(struct gb_interface *intf)
225 {
226         struct gb_host_device *hd = intf->hd;
227         int ret;
228         u32 value;
229         u16 attr;
230         u8 init_status;
231
232         /*
233          * Check if the module is ES2 or ES3, and choose attr number
234          * appropriately.
235          * FIXME: Remove ES2 support from the kernel entirely.
236          */
237         if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
238                                 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID)
239                 attr = DME_ATTR_T_TST_SRC_INCREMENT;
240         else
241                 attr = DME_ATTR_ES3_INIT_STATUS;
242
243         /* Read and clear boot status in ES3_INIT_STATUS */
244         ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
245                                   DME_ATTR_SELECTOR_INDEX, &value);
246
247         if (ret)
248                 return ret;
249
250         /*
251          * A nonzero boot status indicates the module has finished
252          * booting. Clear it.
253          */
254         if (!value) {
255                 dev_err(&intf->dev, "Module not ready yet\n");
256                 return -ENODEV;
257         }
258
259         /*
260          * Check if the module needs to boot from UniPro.
261          * For ES2: We need to check lowest 8 bits of 'value'.
262          * For ES3: We need to check highest 8 bits out of 32 of 'value'.
263          * FIXME: Remove ES2 support from the kernel entirely.
264          */
265         if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
266                                 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID)
267                 init_status = value;
268         else
269                 init_status = value >> 24;
270
271         if (init_status == DME_DIS_UNIPRO_BOOT_STARTED ||
272                                 init_status == DME_DIS_FALLBACK_UNIPRO_BOOT_STARTED)
273                 intf->boot_over_unipro = true;
274
275         return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
276                                    DME_ATTR_SELECTOR_INDEX, 0);
277 }
278
279 int gb_svc_connection_create(struct gb_svc *svc,
280                                 u8 intf1_id, u16 cport1_id,
281                                 u8 intf2_id, u16 cport2_id,
282                                 u8 cport_flags)
283 {
284         struct gb_svc_conn_create_request request;
285
286         request.intf1_id = intf1_id;
287         request.cport1_id = cpu_to_le16(cport1_id);
288         request.intf2_id = intf2_id;
289         request.cport2_id = cpu_to_le16(cport2_id);
290         request.tc = 0;         /* TC0 */
291         request.flags = cport_flags;
292
293         return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
294                                  &request, sizeof(request), NULL, 0);
295 }
296 EXPORT_SYMBOL_GPL(gb_svc_connection_create);
297
298 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
299                                u8 intf2_id, u16 cport2_id)
300 {
301         struct gb_svc_conn_destroy_request request;
302         struct gb_connection *connection = svc->connection;
303         int ret;
304
305         request.intf1_id = intf1_id;
306         request.cport1_id = cpu_to_le16(cport1_id);
307         request.intf2_id = intf2_id;
308         request.cport2_id = cpu_to_le16(cport2_id);
309
310         ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
311                                 &request, sizeof(request), NULL, 0);
312         if (ret) {
313                 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
314                                 intf1_id, cport1_id, intf2_id, cport2_id, ret);
315         }
316 }
317 EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
318
319 /* Creates bi-directional routes between the devices */
320 static int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
321                                u8 intf2_id, u8 dev2_id)
322 {
323         struct gb_svc_route_create_request request;
324
325         request.intf1_id = intf1_id;
326         request.dev1_id = dev1_id;
327         request.intf2_id = intf2_id;
328         request.dev2_id = dev2_id;
329
330         return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
331                                  &request, sizeof(request), NULL, 0);
332 }
333
334 /* Destroys bi-directional routes between the devices */
335 static void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
336 {
337         struct gb_svc_route_destroy_request request;
338         int ret;
339
340         request.intf1_id = intf1_id;
341         request.intf2_id = intf2_id;
342
343         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
344                                 &request, sizeof(request), NULL, 0);
345         if (ret) {
346                 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
347                                 intf1_id, intf2_id, ret);
348         }
349 }
350
351 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
352                                u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
353                                u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
354                                u8 flags, u32 quirks)
355 {
356         struct gb_svc_intf_set_pwrm_request request;
357         struct gb_svc_intf_set_pwrm_response response;
358         int ret;
359
360         request.intf_id = intf_id;
361         request.hs_series = hs_series;
362         request.tx_mode = tx_mode;
363         request.tx_gear = tx_gear;
364         request.tx_nlanes = tx_nlanes;
365         request.rx_mode = rx_mode;
366         request.rx_gear = rx_gear;
367         request.rx_nlanes = rx_nlanes;
368         request.flags = flags;
369         request.quirks = cpu_to_le32(quirks);
370
371         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
372                                 &request, sizeof(request),
373                                 &response, sizeof(response));
374         if (ret < 0)
375                 return ret;
376
377         return le16_to_cpu(response.result_code);
378 }
379 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
380
381 int gb_svc_ping(struct gb_svc *svc)
382 {
383         return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
384                                          NULL, 0, NULL, 0,
385                                          GB_OPERATION_TIMEOUT_DEFAULT * 2);
386 }
387 EXPORT_SYMBOL_GPL(gb_svc_ping);
388
389 static int gb_svc_version_request(struct gb_operation *op)
390 {
391         struct gb_connection *connection = op->connection;
392         struct gb_svc *svc = connection->private;
393         struct gb_protocol_version_request *request;
394         struct gb_protocol_version_response *response;
395
396         if (op->request->payload_size < sizeof(*request)) {
397                 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
398                                 op->request->payload_size,
399                                 sizeof(*request));
400                 return -EINVAL;
401         }
402
403         request = op->request->payload;
404
405         if (request->major > GB_SVC_VERSION_MAJOR) {
406                 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
407                                 request->major, GB_SVC_VERSION_MAJOR);
408                 return -ENOTSUPP;
409         }
410
411         svc->protocol_major = request->major;
412         svc->protocol_minor = request->minor;
413
414         if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
415                 return -ENOMEM;
416
417         response = op->response->payload;
418         response->major = svc->protocol_major;
419         response->minor = svc->protocol_minor;
420
421         return 0;
422 }
423
424 static int gb_svc_hello(struct gb_operation *op)
425 {
426         struct gb_connection *connection = op->connection;
427         struct gb_svc *svc = connection->private;
428         struct gb_svc_hello_request *hello_request;
429         int ret;
430
431         if (op->request->payload_size < sizeof(*hello_request)) {
432                 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
433                                 op->request->payload_size,
434                                 sizeof(*hello_request));
435                 return -EINVAL;
436         }
437
438         hello_request = op->request->payload;
439         svc->endo_id = le16_to_cpu(hello_request->endo_id);
440         svc->ap_intf_id = hello_request->interface_id;
441
442         ret = device_add(&svc->dev);
443         if (ret) {
444                 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
445                 return ret;
446         }
447
448         ret = input_register_device(svc->input);
449         if (ret) {
450                 dev_err(&svc->dev, "failed to register input: %d\n", ret);
451                 device_del(&svc->dev);
452                 return ret;
453         }
454
455         ret = gb_svc_watchdog_create(svc);
456         if (ret) {
457                 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
458                 input_unregister_device(svc->input);
459                 device_del(&svc->dev);
460                 return ret;
461         }
462
463         return 0;
464 }
465
466 static int gb_svc_interface_route_create(struct gb_svc *svc,
467                                                 struct gb_interface *intf)
468 {
469         u8 intf_id = intf->interface_id;
470         u8 device_id;
471         int ret;
472
473         /*
474          * Create a device id for the interface:
475          * - device id 0 (GB_DEVICE_ID_SVC) belongs to the SVC
476          * - device id 1 (GB_DEVICE_ID_AP) belongs to the AP
477          *
478          * XXX Do we need to allocate device ID for SVC or the AP here? And what
479          * XXX about an AP with multiple interface blocks?
480          */
481         ret = ida_simple_get(&svc->device_id_map,
482                              GB_DEVICE_ID_MODULES_START, 0, GFP_KERNEL);
483         if (ret < 0) {
484                 dev_err(&svc->dev, "failed to allocate device id for interface %u: %d\n",
485                                 intf_id, ret);
486                 return ret;
487         }
488         device_id = ret;
489
490         ret = gb_svc_intf_device_id(svc, intf_id, device_id);
491         if (ret) {
492                 dev_err(&svc->dev, "failed to set device id %u for interface %u: %d\n",
493                                 device_id, intf_id, ret);
494                 goto err_ida_remove;
495         }
496
497         /* Create a two-way route between the AP and the new interface. */
498         ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_DEVICE_ID_AP,
499                                   intf_id, device_id);
500         if (ret) {
501                 dev_err(&svc->dev, "failed to create route to interface %u (device id %u): %d\n",
502                                 intf_id, device_id, ret);
503                 goto err_svc_id_free;
504         }
505
506         intf->device_id = device_id;
507
508         return 0;
509
510 err_svc_id_free:
511         /*
512          * XXX Should we tell SVC that this id doesn't belong to interface
513          * XXX anymore.
514          */
515 err_ida_remove:
516         ida_simple_remove(&svc->device_id_map, device_id);
517
518         return ret;
519 }
520
521 static void gb_svc_intf_remove(struct gb_svc *svc, struct gb_interface *intf)
522 {
523         u8 intf_id = intf->interface_id;
524         u8 device_id = intf->device_id;
525
526         intf->disconnected = true;
527
528         gb_interface_remove(intf);
529
530         /*
531          * Destroy the two-way route between the AP and the interface.
532          */
533         gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
534
535         ida_simple_remove(&svc->device_id_map, device_id);
536 }
537
538 static void gb_svc_process_intf_hotplug(struct gb_operation *operation)
539 {
540         struct gb_svc_intf_hotplug_request *request;
541         struct gb_connection *connection = operation->connection;
542         struct gb_svc *svc = connection->private;
543         struct gb_host_device *hd = connection->hd;
544         struct gb_interface *intf;
545         u8 intf_id;
546         u32 vendor_id = 0;
547         u32 product_id = 0;
548         int ret;
549
550         /* The request message size has already been verified. */
551         request = operation->request->payload;
552         intf_id = request->intf_id;
553
554         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
555
556         intf = gb_interface_find(hd, intf_id);
557         if (intf) {
558                 /*
559                  * For ES2, we need to maintain the same vendor/product ids we
560                  * got from bootrom, otherwise userspace can't distinguish
561                  * between modules.
562                  */
563                 vendor_id = intf->vendor_id;
564                 product_id = intf->product_id;
565
566                 /*
567                  * We have received a hotplug request for an interface that
568                  * already exists.
569                  *
570                  * This can happen in cases like:
571                  * - bootrom loading the firmware image and booting into that,
572                  *   which only generates a hotplug event. i.e. no hot-unplug
573                  *   event.
574                  * - Or the firmware on the module crashed and sent hotplug
575                  *   request again to the SVC, which got propagated to AP.
576                  *
577                  * Remove the interface and add it again, and let user know
578                  * about this with a print message.
579                  */
580                 dev_info(&svc->dev, "removing interface %u to add it again\n",
581                                 intf_id);
582                 gb_svc_intf_remove(svc, intf);
583         }
584
585         intf = gb_interface_create(hd, intf_id);
586         if (!intf) {
587                 dev_err(&svc->dev, "failed to create interface %u\n",
588                                 intf_id);
589                 return;
590         }
591
592         intf->ddbl1_manufacturer_id = le32_to_cpu(request->data.ddbl1_mfr_id);
593         intf->ddbl1_product_id = le32_to_cpu(request->data.ddbl1_prod_id);
594         intf->vendor_id = le32_to_cpu(request->data.ara_vend_id);
595         intf->product_id = le32_to_cpu(request->data.ara_prod_id);
596         intf->serial_number = le64_to_cpu(request->data.serial_number);
597
598         /*
599          * Use VID/PID specified at hotplug if:
600          * - Bridge ASIC chip isn't ES2
601          * - Received non-zero Vendor/Product ids
602          *
603          * Otherwise, use the ids we received from bootrom.
604          */
605         if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
606             intf->ddbl1_product_id == ES2_DDBL1_PROD_ID &&
607             intf->vendor_id == 0 && intf->product_id == 0) {
608                 intf->vendor_id = vendor_id;
609                 intf->product_id = product_id;
610         }
611
612         ret = gb_svc_read_and_clear_module_boot_status(intf);
613         if (ret) {
614                 dev_err(&svc->dev, "failed to clear boot status of interface %u: %d\n",
615                                 intf_id, ret);
616                 goto destroy_interface;
617         }
618
619         ret = gb_svc_interface_route_create(svc, intf);
620         if (ret)
621                 goto destroy_interface;
622
623         ret = gb_interface_init(intf);
624         if (ret) {
625                 dev_err(&svc->dev, "failed to initialize interface %u: %d\n",
626                                 intf_id, ret);
627                 goto destroy_route;
628         }
629
630         return;
631
632 destroy_route:
633         gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
634         ida_simple_remove(&svc->device_id_map, intf->device_id);
635 destroy_interface:
636         gb_interface_remove(intf);
637 }
638
639 static void gb_svc_process_intf_hot_unplug(struct gb_operation *operation)
640 {
641         struct gb_svc *svc = operation->connection->private;
642         struct gb_svc_intf_hot_unplug_request *request;
643         struct gb_host_device *hd = operation->connection->hd;
644         struct gb_interface *intf;
645         u8 intf_id;
646
647         /* The request message size has already been verified. */
648         request = operation->request->payload;
649         intf_id = request->intf_id;
650
651         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
652
653         intf = gb_interface_find(hd, intf_id);
654         if (!intf) {
655                 dev_warn(&svc->dev, "could not find hot-unplug interface %u\n",
656                                 intf_id);
657                 return;
658         }
659
660         gb_svc_intf_remove(svc, intf);
661 }
662
663 static void gb_svc_process_deferred_request(struct work_struct *work)
664 {
665         struct gb_svc_deferred_request *dr;
666         struct gb_operation *operation;
667         struct gb_svc *svc;
668         u8 type;
669
670         dr = container_of(work, struct gb_svc_deferred_request, work);
671         operation = dr->operation;
672         svc = operation->connection->private;
673         type = operation->request->header->type;
674
675         switch (type) {
676         case GB_SVC_TYPE_INTF_HOTPLUG:
677                 gb_svc_process_intf_hotplug(operation);
678                 break;
679         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
680                 gb_svc_process_intf_hot_unplug(operation);
681                 break;
682         default:
683                 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
684         }
685
686         gb_operation_put(operation);
687         kfree(dr);
688 }
689
690 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
691 {
692         struct gb_svc *svc = operation->connection->private;
693         struct gb_svc_deferred_request *dr;
694
695         dr = kmalloc(sizeof(*dr), GFP_KERNEL);
696         if (!dr)
697                 return -ENOMEM;
698
699         gb_operation_get(operation);
700
701         dr->operation = operation;
702         INIT_WORK(&dr->work, gb_svc_process_deferred_request);
703
704         queue_work(svc->wq, &dr->work);
705
706         return 0;
707 }
708
709 /*
710  * Bringing up a module can be time consuming, as that may require lots of
711  * initialization on the module side. Over that, we may also need to download
712  * the firmware first and flash that on the module.
713  *
714  * In order not to make other svc events wait for all this to finish,
715  * handle most of module hotplug stuff outside of the hotplug callback, with
716  * help of a workqueue.
717  */
718 static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
719 {
720         struct gb_svc *svc = op->connection->private;
721         struct gb_svc_intf_hotplug_request *request;
722
723         if (op->request->payload_size < sizeof(*request)) {
724                 dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n",
725                                 op->request->payload_size, sizeof(*request));
726                 return -EINVAL;
727         }
728
729         request = op->request->payload;
730
731         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
732
733         return gb_svc_queue_deferred_request(op);
734 }
735
736 static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
737 {
738         struct gb_svc *svc = op->connection->private;
739         struct gb_svc_intf_hot_unplug_request *request;
740
741         if (op->request->payload_size < sizeof(*request)) {
742                 dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n",
743                                 op->request->payload_size, sizeof(*request));
744                 return -EINVAL;
745         }
746
747         request = op->request->payload;
748
749         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
750
751         return gb_svc_queue_deferred_request(op);
752 }
753
754 static int gb_svc_intf_reset_recv(struct gb_operation *op)
755 {
756         struct gb_svc *svc = op->connection->private;
757         struct gb_message *request = op->request;
758         struct gb_svc_intf_reset_request *reset;
759         u8 intf_id;
760
761         if (request->payload_size < sizeof(*reset)) {
762                 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
763                                 request->payload_size, sizeof(*reset));
764                 return -EINVAL;
765         }
766         reset = request->payload;
767
768         intf_id = reset->intf_id;
769
770         /* FIXME Reset the interface here */
771
772         return 0;
773 }
774
775 static int gb_svc_key_code_map(struct gb_svc *svc, u16 key_code, u16 *code)
776 {
777         switch (key_code) {
778         case GB_KEYCODE_ARA:
779                 *code = SVC_KEY_ARA_BUTTON;
780                 break;
781         default:
782                 dev_warn(&svc->dev, "unknown keycode received: %u\n", key_code);
783                 return -EINVAL;
784         }
785
786         return 0;
787 }
788
789 static int gb_svc_key_event_recv(struct gb_operation *op)
790 {
791         struct gb_svc *svc = op->connection->private;
792         struct gb_message *request = op->request;
793         struct gb_svc_key_event_request *key;
794         u16 code;
795         u8 event;
796         int ret;
797
798         if (request->payload_size < sizeof(*key)) {
799                 dev_warn(&svc->dev, "short key request received (%zu < %zu)\n",
800                          request->payload_size, sizeof(*key));
801                 return -EINVAL;
802         }
803
804         key = request->payload;
805
806         ret = gb_svc_key_code_map(svc, le16_to_cpu(key->key_code), &code);
807         if (ret < 0)
808                 return ret;
809
810         event = key->key_event;
811         if ((event != GB_SVC_KEY_PRESSED) && (event != GB_SVC_KEY_RELEASED)) {
812                 dev_warn(&svc->dev, "unknown key event received: %u\n", event);
813                 return -EINVAL;
814         }
815
816         input_report_key(svc->input, code, (event == GB_SVC_KEY_PRESSED));
817         input_sync(svc->input);
818
819         return 0;
820 }
821
822 static int gb_svc_request_handler(struct gb_operation *op)
823 {
824         struct gb_connection *connection = op->connection;
825         struct gb_svc *svc = connection->private;
826         u8 type = op->type;
827         int ret = 0;
828
829         /*
830          * SVC requests need to follow a specific order (at least initially) and
831          * below code takes care of enforcing that. The expected order is:
832          * - PROTOCOL_VERSION
833          * - SVC_HELLO
834          * - Any other request, but the earlier two.
835          *
836          * Incoming requests are guaranteed to be serialized and so we don't
837          * need to protect 'state' for any races.
838          */
839         switch (type) {
840         case GB_REQUEST_TYPE_PROTOCOL_VERSION:
841                 if (svc->state != GB_SVC_STATE_RESET)
842                         ret = -EINVAL;
843                 break;
844         case GB_SVC_TYPE_SVC_HELLO:
845                 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
846                         ret = -EINVAL;
847                 break;
848         default:
849                 if (svc->state != GB_SVC_STATE_SVC_HELLO)
850                         ret = -EINVAL;
851                 break;
852         }
853
854         if (ret) {
855                 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
856                                 type, svc->state);
857                 return ret;
858         }
859
860         switch (type) {
861         case GB_REQUEST_TYPE_PROTOCOL_VERSION:
862                 ret = gb_svc_version_request(op);
863                 if (!ret)
864                         svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
865                 return ret;
866         case GB_SVC_TYPE_SVC_HELLO:
867                 ret = gb_svc_hello(op);
868                 if (!ret)
869                         svc->state = GB_SVC_STATE_SVC_HELLO;
870                 return ret;
871         case GB_SVC_TYPE_INTF_HOTPLUG:
872                 return gb_svc_intf_hotplug_recv(op);
873         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
874                 return gb_svc_intf_hot_unplug_recv(op);
875         case GB_SVC_TYPE_INTF_RESET:
876                 return gb_svc_intf_reset_recv(op);
877         case GB_SVC_TYPE_KEY_EVENT:
878                 return gb_svc_key_event_recv(op);
879         default:
880                 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
881                 return -EINVAL;
882         }
883 }
884
885 static struct input_dev *gb_svc_input_create(struct gb_svc *svc)
886 {
887         struct input_dev *input_dev;
888
889         input_dev = input_allocate_device();
890         if (!input_dev)
891                 return ERR_PTR(-ENOMEM);
892
893         input_dev->name = dev_name(&svc->dev);
894         svc->input_phys = kasprintf(GFP_KERNEL, "greybus-%s/input0",
895                                     input_dev->name);
896         if (!svc->input_phys)
897                 goto err_free_input;
898
899         input_dev->phys = svc->input_phys;
900         input_dev->dev.parent = &svc->dev;
901
902         input_set_drvdata(input_dev, svc);
903
904         input_set_capability(input_dev, EV_KEY, SVC_KEY_ARA_BUTTON);
905
906         return input_dev;
907
908 err_free_input:
909         input_free_device(svc->input);
910         return ERR_PTR(-ENOMEM);
911 }
912
913 static void gb_svc_release(struct device *dev)
914 {
915         struct gb_svc *svc = to_gb_svc(dev);
916
917         if (svc->connection)
918                 gb_connection_destroy(svc->connection);
919         ida_destroy(&svc->device_id_map);
920         destroy_workqueue(svc->wq);
921         kfree(svc->input_phys);
922         kfree(svc);
923 }
924
925 struct device_type greybus_svc_type = {
926         .name           = "greybus_svc",
927         .release        = gb_svc_release,
928 };
929
930 struct gb_svc *gb_svc_create(struct gb_host_device *hd)
931 {
932         struct gb_svc *svc;
933
934         svc = kzalloc(sizeof(*svc), GFP_KERNEL);
935         if (!svc)
936                 return NULL;
937
938         svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
939         if (!svc->wq) {
940                 kfree(svc);
941                 return NULL;
942         }
943
944         svc->dev.parent = &hd->dev;
945         svc->dev.bus = &greybus_bus_type;
946         svc->dev.type = &greybus_svc_type;
947         svc->dev.groups = svc_groups;
948         svc->dev.dma_mask = svc->dev.parent->dma_mask;
949         device_initialize(&svc->dev);
950
951         dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
952
953         ida_init(&svc->device_id_map);
954         svc->state = GB_SVC_STATE_RESET;
955         svc->hd = hd;
956
957         svc->input = gb_svc_input_create(svc);
958         if (IS_ERR(svc->input)) {
959                 dev_err(&svc->dev, "failed to create input device: %ld\n",
960                         PTR_ERR(svc->input));
961                 goto err_put_device;
962         }
963
964         svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
965                                                 gb_svc_request_handler);
966         if (IS_ERR(svc->connection)) {
967                 dev_err(&svc->dev, "failed to create connection: %ld\n",
968                                 PTR_ERR(svc->connection));
969                 goto err_free_input;
970         }
971
972         svc->connection->private = svc;
973
974         return svc;
975
976 err_free_input:
977         input_free_device(svc->input);
978 err_put_device:
979         put_device(&svc->dev);
980         return NULL;
981 }
982
983 int gb_svc_add(struct gb_svc *svc)
984 {
985         int ret;
986
987         /*
988          * The SVC protocol is currently driven by the SVC, so the SVC device
989          * is added from the connection request handler when enough
990          * information has been received.
991          */
992         ret = gb_connection_enable(svc->connection);
993         if (ret)
994                 return ret;
995
996         return 0;
997 }
998
999 void gb_svc_del(struct gb_svc *svc)
1000 {
1001         gb_connection_disable(svc->connection);
1002
1003         /*
1004          * The SVC device and input device may have been registered
1005          * from the request handler.
1006          */
1007         if (device_is_registered(&svc->dev)) {
1008                 gb_svc_watchdog_destroy(svc);
1009                 input_unregister_device(svc->input);
1010                 device_del(&svc->dev);
1011         }
1012
1013         flush_workqueue(svc->wq);
1014 }
1015
1016 void gb_svc_put(struct gb_svc *svc)
1017 {
1018         put_device(&svc->dev);
1019 }