6a8da0dd9f339e8233f85f082f1343f273f14959
[cascardo/linux.git] / drivers / staging / greybus / svc.c
1 /*
2  * SVC Greybus driver.
3  *
4  * Copyright 2015 Google Inc.
5  * Copyright 2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/input.h>
11 #include <linux/workqueue.h>
12
13 #include "greybus.h"
14
15 #define SVC_KEY_ARA_BUTTON      KEY_A
16
17 #define SVC_INTF_EJECT_TIMEOUT  9000
18
19 struct gb_svc_deferred_request {
20         struct work_struct work;
21         struct gb_operation *operation;
22 };
23
24
25 static ssize_t endo_id_show(struct device *dev,
26                         struct device_attribute *attr, char *buf)
27 {
28         struct gb_svc *svc = to_gb_svc(dev);
29
30         return sprintf(buf, "0x%04x\n", svc->endo_id);
31 }
32 static DEVICE_ATTR_RO(endo_id);
33
34 static ssize_t ap_intf_id_show(struct device *dev,
35                         struct device_attribute *attr, char *buf)
36 {
37         struct gb_svc *svc = to_gb_svc(dev);
38
39         return sprintf(buf, "%u\n", svc->ap_intf_id);
40 }
41 static DEVICE_ATTR_RO(ap_intf_id);
42
43
44 // FIXME
45 // This is a hack, we need to do this "right" and clean the interface up
46 // properly, not just forcibly yank the thing out of the system and hope for the
47 // best.  But for now, people want their modules to come out without having to
48 // throw the thing to the ground or get out a screwdriver.
49 static ssize_t intf_eject_store(struct device *dev,
50                                 struct device_attribute *attr, const char *buf,
51                                 size_t len)
52 {
53         struct gb_svc *svc = to_gb_svc(dev);
54         unsigned short intf_id;
55         int ret;
56
57         ret = kstrtou16(buf, 10, &intf_id);
58         if (ret < 0)
59                 return ret;
60
61         dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
62
63         ret = gb_svc_intf_eject(svc, intf_id);
64         if (ret < 0)
65                 return ret;
66
67         return len;
68 }
69 static DEVICE_ATTR_WO(intf_eject);
70
71 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
72                              char *buf)
73 {
74         struct gb_svc *svc = to_gb_svc(dev);
75
76         return sprintf(buf, "%s\n",
77                        gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
78 }
79
80 static ssize_t watchdog_store(struct device *dev,
81                               struct device_attribute *attr, const char *buf,
82                               size_t len)
83 {
84         struct gb_svc *svc = to_gb_svc(dev);
85         int retval;
86         bool user_request;
87
88         retval = strtobool(buf, &user_request);
89         if (retval)
90                 return retval;
91
92         if (user_request)
93                 retval = gb_svc_watchdog_enable(svc);
94         else
95                 retval = gb_svc_watchdog_disable(svc);
96         if (retval)
97                 return retval;
98         return len;
99 }
100 static DEVICE_ATTR_RW(watchdog);
101
102 static struct attribute *svc_attrs[] = {
103         &dev_attr_endo_id.attr,
104         &dev_attr_ap_intf_id.attr,
105         &dev_attr_intf_eject.attr,
106         &dev_attr_watchdog.attr,
107         NULL,
108 };
109 ATTRIBUTE_GROUPS(svc);
110
111 static int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
112 {
113         struct gb_svc_intf_device_id_request request;
114
115         request.intf_id = intf_id;
116         request.device_id = device_id;
117
118         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
119                                  &request, sizeof(request), NULL, 0);
120 }
121
122 int gb_svc_intf_reset(struct gb_svc *svc, u8 intf_id)
123 {
124         struct gb_svc_intf_reset_request request;
125
126         request.intf_id = intf_id;
127
128         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_RESET,
129                                  &request, sizeof(request), NULL, 0);
130 }
131 EXPORT_SYMBOL_GPL(gb_svc_intf_reset);
132
133 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
134 {
135         struct gb_svc_intf_eject_request request;
136         int ret;
137
138         request.intf_id = intf_id;
139
140         /*
141          * The pulse width for module release in svc is long so we need to
142          * increase the timeout so the operation will not return to soon.
143          */
144         ret = gb_operation_sync_timeout(svc->connection,
145                                         GB_SVC_TYPE_INTF_EJECT, &request,
146                                         sizeof(request), NULL, 0,
147                                         SVC_INTF_EJECT_TIMEOUT);
148         if (ret) {
149                 dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
150                 return ret;
151         }
152
153         return 0;
154 }
155
156 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
157                         u32 *value)
158 {
159         struct gb_svc_dme_peer_get_request request;
160         struct gb_svc_dme_peer_get_response response;
161         u16 result;
162         int ret;
163
164         request.intf_id = intf_id;
165         request.attr = cpu_to_le16(attr);
166         request.selector = cpu_to_le16(selector);
167
168         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
169                                 &request, sizeof(request),
170                                 &response, sizeof(response));
171         if (ret) {
172                 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
173                                 intf_id, attr, selector, ret);
174                 return ret;
175         }
176
177         result = le16_to_cpu(response.result_code);
178         if (result) {
179                 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
180                                 intf_id, attr, selector, result);
181                 return -EIO;
182         }
183
184         if (value)
185                 *value = le32_to_cpu(response.attr_value);
186
187         return 0;
188 }
189 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
190
191 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
192                         u32 value)
193 {
194         struct gb_svc_dme_peer_set_request request;
195         struct gb_svc_dme_peer_set_response response;
196         u16 result;
197         int ret;
198
199         request.intf_id = intf_id;
200         request.attr = cpu_to_le16(attr);
201         request.selector = cpu_to_le16(selector);
202         request.value = cpu_to_le32(value);
203
204         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
205                                 &request, sizeof(request),
206                                 &response, sizeof(response));
207         if (ret) {
208                 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
209                                 intf_id, attr, selector, value, ret);
210                 return ret;
211         }
212
213         result = le16_to_cpu(response.result_code);
214         if (result) {
215                 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
216                                 intf_id, attr, selector, value, result);
217                 return -EIO;
218         }
219
220         return 0;
221 }
222 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
223
224 /*
225  * T_TstSrcIncrement is written by the module on ES2 as a stand-in for boot
226  * status attribute ES3_INIT_STATUS. AP needs to read and clear it, after
227  * reading a non-zero value from it.
228  *
229  * FIXME: This is module-hardware dependent and needs to be extended for every
230  * type of module we want to support.
231  */
232 static int gb_svc_read_and_clear_module_boot_status(struct gb_interface *intf)
233 {
234         struct gb_host_device *hd = intf->hd;
235         int ret;
236         u32 value;
237         u16 attr;
238         u8 init_status;
239
240         /*
241          * Check if the module is ES2 or ES3, and choose attr number
242          * appropriately.
243          * FIXME: Remove ES2 support from the kernel entirely.
244          */
245         if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
246                                 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID)
247                 attr = DME_ATTR_T_TST_SRC_INCREMENT;
248         else
249                 attr = DME_ATTR_ES3_INIT_STATUS;
250
251         /* Read and clear boot status in ES3_INIT_STATUS */
252         ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
253                                   DME_ATTR_SELECTOR_INDEX, &value);
254
255         if (ret)
256                 return ret;
257
258         /*
259          * A nonzero boot status indicates the module has finished
260          * booting. Clear it.
261          */
262         if (!value) {
263                 dev_err(&intf->dev, "Module not ready yet\n");
264                 return -ENODEV;
265         }
266
267         /*
268          * Check if the module needs to boot from UniPro.
269          * For ES2: We need to check lowest 8 bits of 'value'.
270          * For ES3: We need to check highest 8 bits out of 32 of 'value'.
271          * FIXME: Remove ES2 support from the kernel entirely.
272          */
273         if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
274                                 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID)
275                 init_status = value;
276         else
277                 init_status = value >> 24;
278
279         if (init_status == DME_DIS_UNIPRO_BOOT_STARTED ||
280                                 init_status == DME_DIS_FALLBACK_UNIPRO_BOOT_STARTED)
281                 intf->boot_over_unipro = true;
282
283         return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
284                                    DME_ATTR_SELECTOR_INDEX, 0);
285 }
286
287 int gb_svc_connection_create(struct gb_svc *svc,
288                                 u8 intf1_id, u16 cport1_id,
289                                 u8 intf2_id, u16 cport2_id,
290                                 u8 cport_flags)
291 {
292         struct gb_svc_conn_create_request request;
293
294         request.intf1_id = intf1_id;
295         request.cport1_id = cpu_to_le16(cport1_id);
296         request.intf2_id = intf2_id;
297         request.cport2_id = cpu_to_le16(cport2_id);
298         request.tc = 0;         /* TC0 */
299         request.flags = cport_flags;
300
301         return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
302                                  &request, sizeof(request), NULL, 0);
303 }
304 EXPORT_SYMBOL_GPL(gb_svc_connection_create);
305
306 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
307                                u8 intf2_id, u16 cport2_id)
308 {
309         struct gb_svc_conn_destroy_request request;
310         struct gb_connection *connection = svc->connection;
311         int ret;
312
313         request.intf1_id = intf1_id;
314         request.cport1_id = cpu_to_le16(cport1_id);
315         request.intf2_id = intf2_id;
316         request.cport2_id = cpu_to_le16(cport2_id);
317
318         ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
319                                 &request, sizeof(request), NULL, 0);
320         if (ret) {
321                 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
322                                 intf1_id, cport1_id, intf2_id, cport2_id, ret);
323         }
324 }
325 EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
326
327 /* Creates bi-directional routes between the devices */
328 static int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
329                                u8 intf2_id, u8 dev2_id)
330 {
331         struct gb_svc_route_create_request request;
332
333         request.intf1_id = intf1_id;
334         request.dev1_id = dev1_id;
335         request.intf2_id = intf2_id;
336         request.dev2_id = dev2_id;
337
338         return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
339                                  &request, sizeof(request), NULL, 0);
340 }
341
342 /* Destroys bi-directional routes between the devices */
343 static void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
344 {
345         struct gb_svc_route_destroy_request request;
346         int ret;
347
348         request.intf1_id = intf1_id;
349         request.intf2_id = intf2_id;
350
351         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
352                                 &request, sizeof(request), NULL, 0);
353         if (ret) {
354                 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
355                                 intf1_id, intf2_id, ret);
356         }
357 }
358
359 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
360                                u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
361                                u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
362                                u8 flags, u32 quirks)
363 {
364         struct gb_svc_intf_set_pwrm_request request;
365         struct gb_svc_intf_set_pwrm_response response;
366         int ret;
367
368         request.intf_id = intf_id;
369         request.hs_series = hs_series;
370         request.tx_mode = tx_mode;
371         request.tx_gear = tx_gear;
372         request.tx_nlanes = tx_nlanes;
373         request.rx_mode = rx_mode;
374         request.rx_gear = rx_gear;
375         request.rx_nlanes = rx_nlanes;
376         request.flags = flags;
377         request.quirks = cpu_to_le32(quirks);
378
379         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
380                                 &request, sizeof(request),
381                                 &response, sizeof(response));
382         if (ret < 0)
383                 return ret;
384
385         return le16_to_cpu(response.result_code);
386 }
387 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
388
389 int gb_svc_ping(struct gb_svc *svc)
390 {
391         return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
392                                          NULL, 0, NULL, 0,
393                                          GB_OPERATION_TIMEOUT_DEFAULT * 2);
394 }
395 EXPORT_SYMBOL_GPL(gb_svc_ping);
396
397 static int gb_svc_version_request(struct gb_operation *op)
398 {
399         struct gb_connection *connection = op->connection;
400         struct gb_svc *svc = gb_connection_get_data(connection);
401         struct gb_protocol_version_request *request;
402         struct gb_protocol_version_response *response;
403
404         if (op->request->payload_size < sizeof(*request)) {
405                 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
406                                 op->request->payload_size,
407                                 sizeof(*request));
408                 return -EINVAL;
409         }
410
411         request = op->request->payload;
412
413         if (request->major > GB_SVC_VERSION_MAJOR) {
414                 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
415                                 request->major, GB_SVC_VERSION_MAJOR);
416                 return -ENOTSUPP;
417         }
418
419         svc->protocol_major = request->major;
420         svc->protocol_minor = request->minor;
421
422         if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
423                 return -ENOMEM;
424
425         response = op->response->payload;
426         response->major = svc->protocol_major;
427         response->minor = svc->protocol_minor;
428
429         return 0;
430 }
431
432 static int gb_svc_hello(struct gb_operation *op)
433 {
434         struct gb_connection *connection = op->connection;
435         struct gb_svc *svc = gb_connection_get_data(connection);
436         struct gb_svc_hello_request *hello_request;
437         int ret;
438
439         if (op->request->payload_size < sizeof(*hello_request)) {
440                 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
441                                 op->request->payload_size,
442                                 sizeof(*hello_request));
443                 return -EINVAL;
444         }
445
446         hello_request = op->request->payload;
447         svc->endo_id = le16_to_cpu(hello_request->endo_id);
448         svc->ap_intf_id = hello_request->interface_id;
449
450         ret = device_add(&svc->dev);
451         if (ret) {
452                 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
453                 return ret;
454         }
455
456         ret = input_register_device(svc->input);
457         if (ret) {
458                 dev_err(&svc->dev, "failed to register input: %d\n", ret);
459                 device_del(&svc->dev);
460                 return ret;
461         }
462
463         ret = gb_svc_watchdog_create(svc);
464         if (ret) {
465                 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
466                 input_unregister_device(svc->input);
467                 device_del(&svc->dev);
468                 return ret;
469         }
470
471         return 0;
472 }
473
474 static int gb_svc_interface_route_create(struct gb_svc *svc,
475                                                 struct gb_interface *intf)
476 {
477         u8 intf_id = intf->interface_id;
478         u8 device_id;
479         int ret;
480
481         /*
482          * Create a device id for the interface:
483          * - device id 0 (GB_DEVICE_ID_SVC) belongs to the SVC
484          * - device id 1 (GB_DEVICE_ID_AP) belongs to the AP
485          *
486          * XXX Do we need to allocate device ID for SVC or the AP here? And what
487          * XXX about an AP with multiple interface blocks?
488          */
489         ret = ida_simple_get(&svc->device_id_map,
490                              GB_DEVICE_ID_MODULES_START, 0, GFP_KERNEL);
491         if (ret < 0) {
492                 dev_err(&svc->dev, "failed to allocate device id for interface %u: %d\n",
493                                 intf_id, ret);
494                 return ret;
495         }
496         device_id = ret;
497
498         ret = gb_svc_intf_device_id(svc, intf_id, device_id);
499         if (ret) {
500                 dev_err(&svc->dev, "failed to set device id %u for interface %u: %d\n",
501                                 device_id, intf_id, ret);
502                 goto err_ida_remove;
503         }
504
505         /* Create a two-way route between the AP and the new interface. */
506         ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_DEVICE_ID_AP,
507                                   intf_id, device_id);
508         if (ret) {
509                 dev_err(&svc->dev, "failed to create route to interface %u (device id %u): %d\n",
510                                 intf_id, device_id, ret);
511                 goto err_svc_id_free;
512         }
513
514         intf->device_id = device_id;
515
516         return 0;
517
518 err_svc_id_free:
519         /*
520          * XXX Should we tell SVC that this id doesn't belong to interface
521          * XXX anymore.
522          */
523 err_ida_remove:
524         ida_simple_remove(&svc->device_id_map, device_id);
525
526         return ret;
527 }
528
529 static void gb_svc_interface_route_destroy(struct gb_svc *svc,
530                                                 struct gb_interface *intf)
531 {
532         if (intf->device_id == GB_DEVICE_ID_BAD)
533                 return;
534
535         gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
536         ida_simple_remove(&svc->device_id_map, intf->device_id);
537         intf->device_id = GB_DEVICE_ID_BAD;
538 }
539
540 static void gb_svc_intf_remove(struct gb_svc *svc, struct gb_interface *intf)
541 {
542         intf->disconnected = true;
543
544         gb_interface_disable(intf);
545         gb_svc_interface_route_destroy(svc, intf);
546         gb_interface_remove(intf);
547 }
548
549 static void gb_svc_process_intf_hotplug(struct gb_operation *operation)
550 {
551         struct gb_svc_intf_hotplug_request *request;
552         struct gb_connection *connection = operation->connection;
553         struct gb_svc *svc = gb_connection_get_data(connection);
554         struct gb_host_device *hd = connection->hd;
555         struct gb_interface *intf;
556         u8 intf_id;
557         u32 vendor_id = 0;
558         u32 product_id = 0;
559         int ret;
560
561         /* The request message size has already been verified. */
562         request = operation->request->payload;
563         intf_id = request->intf_id;
564
565         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
566
567         intf = gb_interface_find(hd, intf_id);
568         if (intf) {
569                 /*
570                  * For ES2, we need to maintain the same vendor/product ids we
571                  * got from bootrom, otherwise userspace can't distinguish
572                  * between modules.
573                  */
574                 vendor_id = intf->vendor_id;
575                 product_id = intf->product_id;
576
577                 /*
578                  * We have received a hotplug request for an interface that
579                  * already exists.
580                  *
581                  * This can happen in cases like:
582                  * - bootrom loading the firmware image and booting into that,
583                  *   which only generates a hotplug event. i.e. no hot-unplug
584                  *   event.
585                  * - Or the firmware on the module crashed and sent hotplug
586                  *   request again to the SVC, which got propagated to AP.
587                  *
588                  * Remove the interface and add it again, and let user know
589                  * about this with a print message.
590                  */
591                 dev_info(&svc->dev, "removing interface %u to add it again\n",
592                                 intf_id);
593                 gb_svc_intf_remove(svc, intf);
594         }
595
596         intf = gb_interface_create(hd, intf_id);
597         if (!intf) {
598                 dev_err(&svc->dev, "failed to create interface %u\n",
599                                 intf_id);
600                 return;
601         }
602
603         intf->ddbl1_manufacturer_id = le32_to_cpu(request->data.ddbl1_mfr_id);
604         intf->ddbl1_product_id = le32_to_cpu(request->data.ddbl1_prod_id);
605         intf->vendor_id = le32_to_cpu(request->data.ara_vend_id);
606         intf->product_id = le32_to_cpu(request->data.ara_prod_id);
607         intf->serial_number = le64_to_cpu(request->data.serial_number);
608
609         /*
610          * Use VID/PID specified at hotplug if:
611          * - Bridge ASIC chip isn't ES2
612          * - Received non-zero Vendor/Product ids
613          *
614          * Otherwise, use the ids we received from bootrom.
615          */
616         if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
617             intf->ddbl1_product_id == ES2_DDBL1_PROD_ID &&
618             intf->vendor_id == 0 && intf->product_id == 0) {
619                 intf->vendor_id = vendor_id;
620                 intf->product_id = product_id;
621         }
622
623         ret = gb_svc_read_and_clear_module_boot_status(intf);
624         if (ret) {
625                 dev_err(&svc->dev, "failed to clear boot status of interface %u: %d\n",
626                                 intf_id, ret);
627                 goto out_interface_add;
628         }
629
630         ret = gb_svc_interface_route_create(svc, intf);
631         if (ret)
632                 goto out_interface_add;
633
634         ret = gb_interface_enable(intf);
635         if (ret) {
636                 dev_err(&svc->dev, "failed to enable interface %u: %d\n",
637                                 intf_id, ret);
638                 goto out_interface_add;
639         }
640
641 out_interface_add:
642         gb_interface_add(intf);
643 }
644
645 static void gb_svc_process_intf_hot_unplug(struct gb_operation *operation)
646 {
647         struct gb_svc *svc = gb_connection_get_data(operation->connection);
648         struct gb_svc_intf_hot_unplug_request *request;
649         struct gb_host_device *hd = operation->connection->hd;
650         struct gb_interface *intf;
651         u8 intf_id;
652
653         /* The request message size has already been verified. */
654         request = operation->request->payload;
655         intf_id = request->intf_id;
656
657         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
658
659         intf = gb_interface_find(hd, intf_id);
660         if (!intf) {
661                 dev_warn(&svc->dev, "could not find hot-unplug interface %u\n",
662                                 intf_id);
663                 return;
664         }
665
666         gb_svc_intf_remove(svc, intf);
667 }
668
669 static void gb_svc_process_deferred_request(struct work_struct *work)
670 {
671         struct gb_svc_deferred_request *dr;
672         struct gb_operation *operation;
673         struct gb_svc *svc;
674         u8 type;
675
676         dr = container_of(work, struct gb_svc_deferred_request, work);
677         operation = dr->operation;
678         svc = gb_connection_get_data(operation->connection);
679         type = operation->request->header->type;
680
681         switch (type) {
682         case GB_SVC_TYPE_INTF_HOTPLUG:
683                 gb_svc_process_intf_hotplug(operation);
684                 break;
685         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
686                 gb_svc_process_intf_hot_unplug(operation);
687                 break;
688         default:
689                 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
690         }
691
692         gb_operation_put(operation);
693         kfree(dr);
694 }
695
696 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
697 {
698         struct gb_svc *svc = gb_connection_get_data(operation->connection);
699         struct gb_svc_deferred_request *dr;
700
701         dr = kmalloc(sizeof(*dr), GFP_KERNEL);
702         if (!dr)
703                 return -ENOMEM;
704
705         gb_operation_get(operation);
706
707         dr->operation = operation;
708         INIT_WORK(&dr->work, gb_svc_process_deferred_request);
709
710         queue_work(svc->wq, &dr->work);
711
712         return 0;
713 }
714
715 /*
716  * Bringing up a module can be time consuming, as that may require lots of
717  * initialization on the module side. Over that, we may also need to download
718  * the firmware first and flash that on the module.
719  *
720  * In order not to make other svc events wait for all this to finish,
721  * handle most of module hotplug stuff outside of the hotplug callback, with
722  * help of a workqueue.
723  */
724 static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
725 {
726         struct gb_svc *svc = gb_connection_get_data(op->connection);
727         struct gb_svc_intf_hotplug_request *request;
728
729         if (op->request->payload_size < sizeof(*request)) {
730                 dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n",
731                                 op->request->payload_size, sizeof(*request));
732                 return -EINVAL;
733         }
734
735         request = op->request->payload;
736
737         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
738
739         return gb_svc_queue_deferred_request(op);
740 }
741
742 static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
743 {
744         struct gb_svc *svc = gb_connection_get_data(op->connection);
745         struct gb_svc_intf_hot_unplug_request *request;
746
747         if (op->request->payload_size < sizeof(*request)) {
748                 dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n",
749                                 op->request->payload_size, sizeof(*request));
750                 return -EINVAL;
751         }
752
753         request = op->request->payload;
754
755         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
756
757         return gb_svc_queue_deferred_request(op);
758 }
759
760 static int gb_svc_intf_reset_recv(struct gb_operation *op)
761 {
762         struct gb_svc *svc = gb_connection_get_data(op->connection);
763         struct gb_message *request = op->request;
764         struct gb_svc_intf_reset_request *reset;
765         u8 intf_id;
766
767         if (request->payload_size < sizeof(*reset)) {
768                 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
769                                 request->payload_size, sizeof(*reset));
770                 return -EINVAL;
771         }
772         reset = request->payload;
773
774         intf_id = reset->intf_id;
775
776         /* FIXME Reset the interface here */
777
778         return 0;
779 }
780
781 static int gb_svc_key_code_map(struct gb_svc *svc, u16 key_code, u16 *code)
782 {
783         switch (key_code) {
784         case GB_KEYCODE_ARA:
785                 *code = SVC_KEY_ARA_BUTTON;
786                 break;
787         default:
788                 dev_warn(&svc->dev, "unknown keycode received: %u\n", key_code);
789                 return -EINVAL;
790         }
791
792         return 0;
793 }
794
795 static int gb_svc_key_event_recv(struct gb_operation *op)
796 {
797         struct gb_svc *svc = gb_connection_get_data(op->connection);
798         struct gb_message *request = op->request;
799         struct gb_svc_key_event_request *key;
800         u16 code;
801         u8 event;
802         int ret;
803
804         if (request->payload_size < sizeof(*key)) {
805                 dev_warn(&svc->dev, "short key request received (%zu < %zu)\n",
806                          request->payload_size, sizeof(*key));
807                 return -EINVAL;
808         }
809
810         key = request->payload;
811
812         ret = gb_svc_key_code_map(svc, le16_to_cpu(key->key_code), &code);
813         if (ret < 0)
814                 return ret;
815
816         event = key->key_event;
817         if ((event != GB_SVC_KEY_PRESSED) && (event != GB_SVC_KEY_RELEASED)) {
818                 dev_warn(&svc->dev, "unknown key event received: %u\n", event);
819                 return -EINVAL;
820         }
821
822         input_report_key(svc->input, code, (event == GB_SVC_KEY_PRESSED));
823         input_sync(svc->input);
824
825         return 0;
826 }
827
828 static int gb_svc_request_handler(struct gb_operation *op)
829 {
830         struct gb_connection *connection = op->connection;
831         struct gb_svc *svc = gb_connection_get_data(connection);
832         u8 type = op->type;
833         int ret = 0;
834
835         /*
836          * SVC requests need to follow a specific order (at least initially) and
837          * below code takes care of enforcing that. The expected order is:
838          * - PROTOCOL_VERSION
839          * - SVC_HELLO
840          * - Any other request, but the earlier two.
841          *
842          * Incoming requests are guaranteed to be serialized and so we don't
843          * need to protect 'state' for any races.
844          */
845         switch (type) {
846         case GB_REQUEST_TYPE_PROTOCOL_VERSION:
847                 if (svc->state != GB_SVC_STATE_RESET)
848                         ret = -EINVAL;
849                 break;
850         case GB_SVC_TYPE_SVC_HELLO:
851                 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
852                         ret = -EINVAL;
853                 break;
854         default:
855                 if (svc->state != GB_SVC_STATE_SVC_HELLO)
856                         ret = -EINVAL;
857                 break;
858         }
859
860         if (ret) {
861                 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
862                                 type, svc->state);
863                 return ret;
864         }
865
866         switch (type) {
867         case GB_REQUEST_TYPE_PROTOCOL_VERSION:
868                 ret = gb_svc_version_request(op);
869                 if (!ret)
870                         svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
871                 return ret;
872         case GB_SVC_TYPE_SVC_HELLO:
873                 ret = gb_svc_hello(op);
874                 if (!ret)
875                         svc->state = GB_SVC_STATE_SVC_HELLO;
876                 return ret;
877         case GB_SVC_TYPE_INTF_HOTPLUG:
878                 return gb_svc_intf_hotplug_recv(op);
879         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
880                 return gb_svc_intf_hot_unplug_recv(op);
881         case GB_SVC_TYPE_INTF_RESET:
882                 return gb_svc_intf_reset_recv(op);
883         case GB_SVC_TYPE_KEY_EVENT:
884                 return gb_svc_key_event_recv(op);
885         default:
886                 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
887                 return -EINVAL;
888         }
889 }
890
891 static struct input_dev *gb_svc_input_create(struct gb_svc *svc)
892 {
893         struct input_dev *input_dev;
894
895         input_dev = input_allocate_device();
896         if (!input_dev)
897                 return ERR_PTR(-ENOMEM);
898
899         input_dev->name = dev_name(&svc->dev);
900         svc->input_phys = kasprintf(GFP_KERNEL, "greybus-%s/input0",
901                                     input_dev->name);
902         if (!svc->input_phys)
903                 goto err_free_input;
904
905         input_dev->phys = svc->input_phys;
906         input_dev->dev.parent = &svc->dev;
907
908         input_set_drvdata(input_dev, svc);
909
910         input_set_capability(input_dev, EV_KEY, SVC_KEY_ARA_BUTTON);
911
912         return input_dev;
913
914 err_free_input:
915         input_free_device(svc->input);
916         return ERR_PTR(-ENOMEM);
917 }
918
919 static void gb_svc_release(struct device *dev)
920 {
921         struct gb_svc *svc = to_gb_svc(dev);
922
923         if (svc->connection)
924                 gb_connection_destroy(svc->connection);
925         ida_destroy(&svc->device_id_map);
926         destroy_workqueue(svc->wq);
927         kfree(svc->input_phys);
928         kfree(svc);
929 }
930
931 struct device_type greybus_svc_type = {
932         .name           = "greybus_svc",
933         .release        = gb_svc_release,
934 };
935
936 struct gb_svc *gb_svc_create(struct gb_host_device *hd)
937 {
938         struct gb_svc *svc;
939
940         svc = kzalloc(sizeof(*svc), GFP_KERNEL);
941         if (!svc)
942                 return NULL;
943
944         svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
945         if (!svc->wq) {
946                 kfree(svc);
947                 return NULL;
948         }
949
950         svc->dev.parent = &hd->dev;
951         svc->dev.bus = &greybus_bus_type;
952         svc->dev.type = &greybus_svc_type;
953         svc->dev.groups = svc_groups;
954         svc->dev.dma_mask = svc->dev.parent->dma_mask;
955         device_initialize(&svc->dev);
956
957         dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
958
959         ida_init(&svc->device_id_map);
960         svc->state = GB_SVC_STATE_RESET;
961         svc->hd = hd;
962
963         svc->input = gb_svc_input_create(svc);
964         if (IS_ERR(svc->input)) {
965                 dev_err(&svc->dev, "failed to create input device: %ld\n",
966                         PTR_ERR(svc->input));
967                 goto err_put_device;
968         }
969
970         svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
971                                                 gb_svc_request_handler);
972         if (IS_ERR(svc->connection)) {
973                 dev_err(&svc->dev, "failed to create connection: %ld\n",
974                                 PTR_ERR(svc->connection));
975                 goto err_free_input;
976         }
977
978         gb_connection_set_data(svc->connection, svc);
979
980         return svc;
981
982 err_free_input:
983         input_free_device(svc->input);
984 err_put_device:
985         put_device(&svc->dev);
986         return NULL;
987 }
988
989 int gb_svc_add(struct gb_svc *svc)
990 {
991         int ret;
992
993         /*
994          * The SVC protocol is currently driven by the SVC, so the SVC device
995          * is added from the connection request handler when enough
996          * information has been received.
997          */
998         ret = gb_connection_enable(svc->connection);
999         if (ret)
1000                 return ret;
1001
1002         return 0;
1003 }
1004
1005 static void gb_svc_remove_interfaces(struct gb_svc *svc)
1006 {
1007         struct gb_interface *intf, *tmp;
1008
1009         list_for_each_entry_safe(intf, tmp, &svc->hd->interfaces, links) {
1010                 gb_interface_disable(intf);
1011                 gb_interface_remove(intf);
1012         }
1013 }
1014
1015 void gb_svc_del(struct gb_svc *svc)
1016 {
1017         gb_connection_disable(svc->connection);
1018
1019         /*
1020          * The SVC device and input device may have been registered
1021          * from the request handler.
1022          */
1023         if (device_is_registered(&svc->dev)) {
1024                 gb_svc_watchdog_destroy(svc);
1025                 input_unregister_device(svc->input);
1026                 device_del(&svc->dev);
1027         }
1028
1029         flush_workqueue(svc->wq);
1030
1031         gb_svc_remove_interfaces(svc);
1032 }
1033
1034 void gb_svc_put(struct gb_svc *svc)
1035 {
1036         put_device(&svc->dev);
1037 }