greybus: interface: implement generic mode-switch functionality
[cascardo/linux.git] / drivers / staging / greybus / svc.c
1 /*
2  * SVC Greybus driver.
3  *
4  * Copyright 2015 Google Inc.
5  * Copyright 2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/debugfs.h>
11 #include <linux/input.h>
12 #include <linux/workqueue.h>
13
14 #include "greybus.h"
15
16 #define SVC_KEY_ARA_BUTTON      KEY_A
17
18 #define SVC_INTF_EJECT_TIMEOUT          9000
19 #define SVC_INTF_ACTIVATE_TIMEOUT       6000
20
21 struct gb_svc_deferred_request {
22         struct work_struct work;
23         struct gb_operation *operation;
24 };
25
26
27 static int gb_svc_queue_deferred_request(struct gb_operation *operation);
28
29 static ssize_t endo_id_show(struct device *dev,
30                         struct device_attribute *attr, char *buf)
31 {
32         struct gb_svc *svc = to_gb_svc(dev);
33
34         return sprintf(buf, "0x%04x\n", svc->endo_id);
35 }
36 static DEVICE_ATTR_RO(endo_id);
37
38 static ssize_t ap_intf_id_show(struct device *dev,
39                         struct device_attribute *attr, char *buf)
40 {
41         struct gb_svc *svc = to_gb_svc(dev);
42
43         return sprintf(buf, "%u\n", svc->ap_intf_id);
44 }
45 static DEVICE_ATTR_RO(ap_intf_id);
46
47
48 // FIXME
49 // This is a hack, we need to do this "right" and clean the interface up
50 // properly, not just forcibly yank the thing out of the system and hope for the
51 // best.  But for now, people want their modules to come out without having to
52 // throw the thing to the ground or get out a screwdriver.
53 static ssize_t intf_eject_store(struct device *dev,
54                                 struct device_attribute *attr, const char *buf,
55                                 size_t len)
56 {
57         struct gb_svc *svc = to_gb_svc(dev);
58         unsigned short intf_id;
59         int ret;
60
61         ret = kstrtou16(buf, 10, &intf_id);
62         if (ret < 0)
63                 return ret;
64
65         dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
66
67         ret = gb_svc_intf_eject(svc, intf_id);
68         if (ret < 0)
69                 return ret;
70
71         return len;
72 }
73 static DEVICE_ATTR_WO(intf_eject);
74
75 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
76                              char *buf)
77 {
78         struct gb_svc *svc = to_gb_svc(dev);
79
80         return sprintf(buf, "%s\n",
81                        gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
82 }
83
84 static ssize_t watchdog_store(struct device *dev,
85                               struct device_attribute *attr, const char *buf,
86                               size_t len)
87 {
88         struct gb_svc *svc = to_gb_svc(dev);
89         int retval;
90         bool user_request;
91
92         retval = strtobool(buf, &user_request);
93         if (retval)
94                 return retval;
95
96         if (user_request)
97                 retval = gb_svc_watchdog_enable(svc);
98         else
99                 retval = gb_svc_watchdog_disable(svc);
100         if (retval)
101                 return retval;
102         return len;
103 }
104 static DEVICE_ATTR_RW(watchdog);
105
106 static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
107 {
108         struct gb_svc_pwrmon_rail_count_get_response response;
109         int ret;
110
111         ret = gb_operation_sync(svc->connection,
112                                 GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
113                                 &response, sizeof(response));
114         if (ret) {
115                 dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
116                 return ret;
117         }
118
119         *value = response.rail_count;
120
121         return 0;
122 }
123
124 static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
125                 struct gb_svc_pwrmon_rail_names_get_response *response,
126                 size_t bufsize)
127 {
128         int ret;
129
130         ret = gb_operation_sync(svc->connection,
131                                 GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
132                                 response, bufsize);
133         if (ret) {
134                 dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
135                 return ret;
136         }
137
138         return 0;
139 }
140
141 static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
142                                     u8 measurement_type, u32 *value)
143 {
144         struct gb_svc_pwrmon_sample_get_request request;
145         struct gb_svc_pwrmon_sample_get_response response;
146         int ret;
147
148         request.rail_id = rail_id;
149         request.measurement_type = measurement_type;
150
151         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
152                                 &request, sizeof(request),
153                                 &response, sizeof(response));
154         if (ret) {
155                 dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
156                 return ret;
157         }
158
159         if (response.result) {
160                 dev_err(&svc->dev,
161                         "UniPro error while getting rail power sample (%d %d): %d\n",
162                         rail_id, measurement_type, response.result);
163                 switch (response.result) {
164                 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
165                         return -EINVAL;
166                 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
167                         return -ENOMSG;
168                 default:
169                         return -EREMOTEIO;
170                 }
171         }
172
173         *value = le32_to_cpu(response.measurement);
174
175         return 0;
176 }
177
178 int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
179                                   u8 measurement_type, u32 *value)
180 {
181         struct gb_svc_pwrmon_intf_sample_get_request request;
182         struct gb_svc_pwrmon_intf_sample_get_response response;
183         int ret;
184
185         request.intf_id = intf_id;
186         request.measurement_type = measurement_type;
187
188         ret = gb_operation_sync(svc->connection,
189                                 GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
190                                 &request, sizeof(request),
191                                 &response, sizeof(response));
192         if (ret) {
193                 dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
194                 return ret;
195         }
196
197         if (response.result) {
198                 dev_err(&svc->dev,
199                         "UniPro error while getting intf power sample (%d %d): %d\n",
200                         intf_id, measurement_type, response.result);
201                 switch (response.result) {
202                 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
203                         return -EINVAL;
204                 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
205                         return -ENOMSG;
206                 default:
207                         return -EREMOTEIO;
208                 }
209         }
210
211         *value = le32_to_cpu(response.measurement);
212
213         return 0;
214 }
215
216 static struct attribute *svc_attrs[] = {
217         &dev_attr_endo_id.attr,
218         &dev_attr_ap_intf_id.attr,
219         &dev_attr_intf_eject.attr,
220         &dev_attr_watchdog.attr,
221         NULL,
222 };
223 ATTRIBUTE_GROUPS(svc);
224
225 int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
226 {
227         struct gb_svc_intf_device_id_request request;
228
229         request.intf_id = intf_id;
230         request.device_id = device_id;
231
232         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
233                                  &request, sizeof(request), NULL, 0);
234 }
235
236 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
237 {
238         struct gb_svc_intf_eject_request request;
239         int ret;
240
241         request.intf_id = intf_id;
242
243         /*
244          * The pulse width for module release in svc is long so we need to
245          * increase the timeout so the operation will not return to soon.
246          */
247         ret = gb_operation_sync_timeout(svc->connection,
248                                         GB_SVC_TYPE_INTF_EJECT, &request,
249                                         sizeof(request), NULL, 0,
250                                         SVC_INTF_EJECT_TIMEOUT);
251         if (ret) {
252                 dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
253                 return ret;
254         }
255
256         return 0;
257 }
258
259 int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
260 {
261         struct gb_svc_intf_vsys_request request;
262         struct gb_svc_intf_vsys_response response;
263         int type, ret;
264
265         request.intf_id = intf_id;
266
267         if (enable)
268                 type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
269         else
270                 type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
271
272         ret = gb_operation_sync(svc->connection, type,
273                         &request, sizeof(request),
274                         &response, sizeof(response));
275         if (ret < 0)
276                 return ret;
277         if (response.result_code != GB_SVC_INTF_VSYS_OK)
278                 return -EREMOTEIO;
279         return 0;
280 }
281
282 int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
283 {
284         struct gb_svc_intf_refclk_request request;
285         struct gb_svc_intf_refclk_response response;
286         int type, ret;
287
288         request.intf_id = intf_id;
289
290         if (enable)
291                 type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
292         else
293                 type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
294
295         ret = gb_operation_sync(svc->connection, type,
296                         &request, sizeof(request),
297                         &response, sizeof(response));
298         if (ret < 0)
299                 return ret;
300         if (response.result_code != GB_SVC_INTF_REFCLK_OK)
301                 return -EREMOTEIO;
302         return 0;
303 }
304
305 int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
306 {
307         struct gb_svc_intf_unipro_request request;
308         struct gb_svc_intf_unipro_response response;
309         int type, ret;
310
311         request.intf_id = intf_id;
312
313         if (enable)
314                 type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
315         else
316                 type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
317
318         ret = gb_operation_sync(svc->connection, type,
319                         &request, sizeof(request),
320                         &response, sizeof(response));
321         if (ret < 0)
322                 return ret;
323         if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
324                 return -EREMOTEIO;
325         return 0;
326 }
327
328 int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
329 {
330         struct gb_svc_intf_activate_request request;
331         struct gb_svc_intf_activate_response response;
332         int ret;
333
334         request.intf_id = intf_id;
335
336         ret = gb_operation_sync_timeout(svc->connection,
337                         GB_SVC_TYPE_INTF_ACTIVATE,
338                         &request, sizeof(request),
339                         &response, sizeof(response),
340                         SVC_INTF_ACTIVATE_TIMEOUT);
341         if (ret < 0)
342                 return ret;
343         if (response.status != GB_SVC_OP_SUCCESS) {
344                 dev_err(&svc->dev, "failed to activate interface %u: %u\n",
345                                 intf_id, response.status);
346                 return -EREMOTEIO;
347         }
348
349         *intf_type = response.intf_type;
350
351         return 0;
352 }
353
354 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
355                         u32 *value)
356 {
357         struct gb_svc_dme_peer_get_request request;
358         struct gb_svc_dme_peer_get_response response;
359         u16 result;
360         int ret;
361
362         request.intf_id = intf_id;
363         request.attr = cpu_to_le16(attr);
364         request.selector = cpu_to_le16(selector);
365
366         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
367                                 &request, sizeof(request),
368                                 &response, sizeof(response));
369         if (ret) {
370                 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
371                                 intf_id, attr, selector, ret);
372                 return ret;
373         }
374
375         result = le16_to_cpu(response.result_code);
376         if (result) {
377                 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
378                                 intf_id, attr, selector, result);
379                 return -EREMOTEIO;
380         }
381
382         if (value)
383                 *value = le32_to_cpu(response.attr_value);
384
385         return 0;
386 }
387 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
388
389 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
390                         u32 value)
391 {
392         struct gb_svc_dme_peer_set_request request;
393         struct gb_svc_dme_peer_set_response response;
394         u16 result;
395         int ret;
396
397         request.intf_id = intf_id;
398         request.attr = cpu_to_le16(attr);
399         request.selector = cpu_to_le16(selector);
400         request.value = cpu_to_le32(value);
401
402         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
403                                 &request, sizeof(request),
404                                 &response, sizeof(response));
405         if (ret) {
406                 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
407                                 intf_id, attr, selector, value, ret);
408                 return ret;
409         }
410
411         result = le16_to_cpu(response.result_code);
412         if (result) {
413                 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
414                                 intf_id, attr, selector, value, result);
415                 return -EREMOTEIO;
416         }
417
418         return 0;
419 }
420 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
421
422 int gb_svc_connection_create(struct gb_svc *svc,
423                                 u8 intf1_id, u16 cport1_id,
424                                 u8 intf2_id, u16 cport2_id,
425                                 u8 cport_flags)
426 {
427         struct gb_svc_conn_create_request request;
428
429         request.intf1_id = intf1_id;
430         request.cport1_id = cpu_to_le16(cport1_id);
431         request.intf2_id = intf2_id;
432         request.cport2_id = cpu_to_le16(cport2_id);
433         request.tc = 0;         /* TC0 */
434         request.flags = cport_flags;
435
436         return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
437                                  &request, sizeof(request), NULL, 0);
438 }
439 EXPORT_SYMBOL_GPL(gb_svc_connection_create);
440
441 void gb_svc_connection_quiescing(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
442                                         u8 intf2_id, u16 cport2_id)
443 {
444         /* FIXME: implement */
445
446         dev_dbg(&svc->dev, "%s - (%u:%u %u:%u)\n", __func__,
447                                 intf1_id, cport1_id, intf2_id, cport2_id);
448 }
449 EXPORT_SYMBOL_GPL(gb_svc_connection_quiescing);
450
451 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
452                                u8 intf2_id, u16 cport2_id)
453 {
454         struct gb_svc_conn_destroy_request request;
455         struct gb_connection *connection = svc->connection;
456         int ret;
457
458         request.intf1_id = intf1_id;
459         request.cport1_id = cpu_to_le16(cport1_id);
460         request.intf2_id = intf2_id;
461         request.cport2_id = cpu_to_le16(cport2_id);
462
463         ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
464                                 &request, sizeof(request), NULL, 0);
465         if (ret) {
466                 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
467                                 intf1_id, cport1_id, intf2_id, cport2_id, ret);
468         }
469 }
470 EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
471
472 int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
473                            u32 strobe_delay, u32 refclk)
474 {
475         struct gb_connection *connection = svc->connection;
476         struct gb_svc_timesync_enable_request request;
477
478         request.count = count;
479         request.frame_time = cpu_to_le64(frame_time);
480         request.strobe_delay = cpu_to_le32(strobe_delay);
481         request.refclk = cpu_to_le32(refclk);
482         return gb_operation_sync(connection,
483                                  GB_SVC_TYPE_TIMESYNC_ENABLE,
484                                  &request, sizeof(request), NULL, 0);
485 }
486 EXPORT_SYMBOL_GPL(gb_svc_timesync_enable);
487
488 int gb_svc_timesync_disable(struct gb_svc *svc)
489 {
490         struct gb_connection *connection = svc->connection;
491
492         return gb_operation_sync(connection,
493                                  GB_SVC_TYPE_TIMESYNC_DISABLE,
494                                  NULL, 0, NULL, 0);
495 }
496 EXPORT_SYMBOL_GPL(gb_svc_timesync_disable);
497
498 int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time)
499 {
500         struct gb_connection *connection = svc->connection;
501         struct gb_svc_timesync_authoritative_response response;
502         int ret, i;
503
504         ret = gb_operation_sync(connection,
505                                 GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE, NULL, 0,
506                                 &response, sizeof(response));
507         if (ret < 0)
508                 return ret;
509
510         for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
511                 frame_time[i] = le64_to_cpu(response.frame_time[i]);
512         return 0;
513 }
514 EXPORT_SYMBOL_GPL(gb_svc_timesync_authoritative);
515
516 int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time)
517 {
518         struct gb_connection *connection = svc->connection;
519         struct gb_svc_timesync_ping_response response;
520         int ret;
521
522         ret = gb_operation_sync(connection,
523                                 GB_SVC_TYPE_TIMESYNC_PING,
524                                 NULL, 0,
525                                 &response, sizeof(response));
526         if (ret < 0)
527                 return ret;
528
529         *frame_time = le64_to_cpu(response.frame_time);
530         return 0;
531 }
532 EXPORT_SYMBOL_GPL(gb_svc_timesync_ping);
533
534 int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask)
535 {
536         struct gb_connection *connection = svc->connection;
537         struct gb_svc_timesync_wake_pins_acquire_request request;
538
539         request.strobe_mask = cpu_to_le32(strobe_mask);
540         return gb_operation_sync(connection,
541                                  GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE,
542                                  &request, sizeof(request),
543                                  NULL, 0);
544 }
545 EXPORT_SYMBOL_GPL(gb_svc_timesync_wake_pins_acquire);
546
547 int gb_svc_timesync_wake_pins_release(struct gb_svc *svc)
548 {
549         struct gb_connection *connection = svc->connection;
550
551         return gb_operation_sync(connection,
552                                  GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE,
553                                  NULL, 0, NULL, 0);
554 }
555 EXPORT_SYMBOL_GPL(gb_svc_timesync_wake_pins_release);
556
557 /* Creates bi-directional routes between the devices */
558 int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
559                                u8 intf2_id, u8 dev2_id)
560 {
561         struct gb_svc_route_create_request request;
562
563         request.intf1_id = intf1_id;
564         request.dev1_id = dev1_id;
565         request.intf2_id = intf2_id;
566         request.dev2_id = dev2_id;
567
568         return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
569                                  &request, sizeof(request), NULL, 0);
570 }
571
572 /* Destroys bi-directional routes between the devices */
573 void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
574 {
575         struct gb_svc_route_destroy_request request;
576         int ret;
577
578         request.intf1_id = intf1_id;
579         request.intf2_id = intf2_id;
580
581         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
582                                 &request, sizeof(request), NULL, 0);
583         if (ret) {
584                 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
585                                 intf1_id, intf2_id, ret);
586         }
587 }
588
589 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
590                                u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
591                                u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
592                                u8 flags, u32 quirks)
593 {
594         struct gb_svc_intf_set_pwrm_request request;
595         struct gb_svc_intf_set_pwrm_response response;
596         int ret;
597
598         request.intf_id = intf_id;
599         request.hs_series = hs_series;
600         request.tx_mode = tx_mode;
601         request.tx_gear = tx_gear;
602         request.tx_nlanes = tx_nlanes;
603         request.rx_mode = rx_mode;
604         request.rx_gear = rx_gear;
605         request.rx_nlanes = rx_nlanes;
606         request.flags = flags;
607         request.quirks = cpu_to_le32(quirks);
608
609         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
610                                 &request, sizeof(request),
611                                 &response, sizeof(response));
612         if (ret < 0)
613                 return ret;
614
615         return le16_to_cpu(response.result_code);
616 }
617 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
618
619 int gb_svc_ping(struct gb_svc *svc)
620 {
621         return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
622                                          NULL, 0, NULL, 0,
623                                          GB_OPERATION_TIMEOUT_DEFAULT * 2);
624 }
625 EXPORT_SYMBOL_GPL(gb_svc_ping);
626
627 static int gb_svc_version_request(struct gb_operation *op)
628 {
629         struct gb_connection *connection = op->connection;
630         struct gb_svc *svc = gb_connection_get_data(connection);
631         struct gb_svc_version_request *request;
632         struct gb_svc_version_response *response;
633
634         if (op->request->payload_size < sizeof(*request)) {
635                 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
636                                 op->request->payload_size,
637                                 sizeof(*request));
638                 return -EINVAL;
639         }
640
641         request = op->request->payload;
642
643         if (request->major > GB_SVC_VERSION_MAJOR) {
644                 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
645                                 request->major, GB_SVC_VERSION_MAJOR);
646                 return -ENOTSUPP;
647         }
648
649         svc->protocol_major = request->major;
650         svc->protocol_minor = request->minor;
651
652         if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
653                 return -ENOMEM;
654
655         response = op->response->payload;
656         response->major = svc->protocol_major;
657         response->minor = svc->protocol_minor;
658
659         return 0;
660 }
661
662 static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
663                                         size_t len, loff_t *offset)
664 {
665         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
666         struct gb_svc *svc = pwrmon_rails->svc;
667         int ret, desc;
668         u32 value;
669         char buff[16];
670
671         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
672                                        GB_SVC_PWRMON_TYPE_VOL, &value);
673         if (ret) {
674                 dev_err(&svc->dev,
675                         "failed to get voltage sample %u: %d\n",
676                         pwrmon_rails->id, ret);
677                 return ret;
678         }
679
680         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
681
682         return simple_read_from_buffer(buf, len, offset, buff, desc);
683 }
684
685 static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
686                                         size_t len, loff_t *offset)
687 {
688         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
689         struct gb_svc *svc = pwrmon_rails->svc;
690         int ret, desc;
691         u32 value;
692         char buff[16];
693
694         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
695                                        GB_SVC_PWRMON_TYPE_CURR, &value);
696         if (ret) {
697                 dev_err(&svc->dev,
698                         "failed to get current sample %u: %d\n",
699                         pwrmon_rails->id, ret);
700                 return ret;
701         }
702
703         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
704
705         return simple_read_from_buffer(buf, len, offset, buff, desc);
706 }
707
708 static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
709                                       size_t len, loff_t *offset)
710 {
711         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
712         struct gb_svc *svc = pwrmon_rails->svc;
713         int ret, desc;
714         u32 value;
715         char buff[16];
716
717         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
718                                        GB_SVC_PWRMON_TYPE_PWR, &value);
719         if (ret) {
720                 dev_err(&svc->dev, "failed to get power sample %u: %d\n",
721                         pwrmon_rails->id, ret);
722                 return ret;
723         }
724
725         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
726
727         return simple_read_from_buffer(buf, len, offset, buff, desc);
728 }
729
730 static const struct file_operations pwrmon_debugfs_voltage_fops = {
731         .read           = pwr_debugfs_voltage_read,
732 };
733
734 static const struct file_operations pwrmon_debugfs_current_fops = {
735         .read           = pwr_debugfs_current_read,
736 };
737
738 static const struct file_operations pwrmon_debugfs_power_fops = {
739         .read           = pwr_debugfs_power_read,
740 };
741
742 static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
743 {
744         int i;
745         size_t bufsize;
746         struct dentry *dent;
747         struct gb_svc_pwrmon_rail_names_get_response *rail_names;
748         u8 rail_count;
749
750         dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
751         if (IS_ERR_OR_NULL(dent))
752                 return;
753
754         if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
755                 goto err_pwrmon_debugfs;
756
757         if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
758                 goto err_pwrmon_debugfs;
759
760         bufsize = GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
761
762         rail_names = kzalloc(bufsize, GFP_KERNEL);
763         if (!rail_names)
764                 goto err_pwrmon_debugfs;
765
766         svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
767                                     GFP_KERNEL);
768         if (!svc->pwrmon_rails)
769                 goto err_pwrmon_debugfs_free;
770
771         if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
772                 goto err_pwrmon_debugfs_free;
773
774         for (i = 0; i < rail_count; i++) {
775                 struct dentry *dir;
776                 struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
777                 char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
778
779                 snprintf(fname, sizeof(fname), "%s",
780                          (char *)&rail_names->name[i]);
781
782                 rail->id = i;
783                 rail->svc = svc;
784
785                 dir = debugfs_create_dir(fname, dent);
786                 debugfs_create_file("voltage_now", S_IRUGO, dir, rail,
787                                     &pwrmon_debugfs_voltage_fops);
788                 debugfs_create_file("current_now", S_IRUGO, dir, rail,
789                                     &pwrmon_debugfs_current_fops);
790                 debugfs_create_file("power_now", S_IRUGO, dir, rail,
791                                     &pwrmon_debugfs_power_fops);
792         }
793
794         kfree(rail_names);
795         return;
796
797 err_pwrmon_debugfs_free:
798         kfree(rail_names);
799         kfree(svc->pwrmon_rails);
800         svc->pwrmon_rails = NULL;
801
802 err_pwrmon_debugfs:
803         debugfs_remove(dent);
804 }
805
806 static void gb_svc_debugfs_init(struct gb_svc *svc)
807 {
808         svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
809                                                  gb_debugfs_get());
810         gb_svc_pwrmon_debugfs_init(svc);
811 }
812
813 static void gb_svc_debugfs_exit(struct gb_svc *svc)
814 {
815         debugfs_remove_recursive(svc->debugfs_dentry);
816         kfree(svc->pwrmon_rails);
817         svc->pwrmon_rails = NULL;
818 }
819
820 static int gb_svc_hello(struct gb_operation *op)
821 {
822         struct gb_connection *connection = op->connection;
823         struct gb_svc *svc = gb_connection_get_data(connection);
824         struct gb_svc_hello_request *hello_request;
825         int ret;
826
827         if (op->request->payload_size < sizeof(*hello_request)) {
828                 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
829                                 op->request->payload_size,
830                                 sizeof(*hello_request));
831                 return -EINVAL;
832         }
833
834         hello_request = op->request->payload;
835         svc->endo_id = le16_to_cpu(hello_request->endo_id);
836         svc->ap_intf_id = hello_request->interface_id;
837
838         ret = device_add(&svc->dev);
839         if (ret) {
840                 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
841                 return ret;
842         }
843
844         ret = input_register_device(svc->input);
845         if (ret) {
846                 dev_err(&svc->dev, "failed to register input: %d\n", ret);
847                 device_del(&svc->dev);
848                 return ret;
849         }
850
851         ret = gb_svc_watchdog_create(svc);
852         if (ret) {
853                 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
854                 input_unregister_device(svc->input);
855                 device_del(&svc->dev);
856                 return ret;
857         }
858
859         gb_svc_debugfs_init(svc);
860
861         return gb_svc_queue_deferred_request(op);
862 }
863
864 static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
865                                                         u8 intf_id)
866 {
867         struct gb_host_device *hd = svc->hd;
868         struct gb_module *module;
869         size_t num_interfaces;
870         u8 module_id;
871
872         list_for_each_entry(module, &hd->modules, hd_node) {
873                 module_id = module->module_id;
874                 num_interfaces = module->num_interfaces;
875
876                 if (intf_id >= module_id &&
877                                 intf_id < module_id + num_interfaces) {
878                         return module->interfaces[intf_id - module_id];
879                 }
880         }
881
882         return NULL;
883 }
884
885 static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
886 {
887         struct gb_host_device *hd = svc->hd;
888         struct gb_module *module;
889
890         list_for_each_entry(module, &hd->modules, hd_node) {
891                 if (module->module_id == module_id)
892                         return module;
893         }
894
895         return NULL;
896 }
897
898 static void gb_svc_process_hello_deferred(struct gb_operation *operation)
899 {
900         struct gb_connection *connection = operation->connection;
901         struct gb_svc *svc = gb_connection_get_data(connection);
902         int ret;
903
904         /*
905          * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
906          * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
907          * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
908          * module.
909          *
910          * The code should be removed once SW-2217, Heuristic for UniPro
911          * Power Mode Changes is resolved.
912          */
913         ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
914                                         GB_SVC_UNIPRO_HS_SERIES_A,
915                                         GB_SVC_UNIPRO_SLOW_AUTO_MODE,
916                                         2, 1,
917                                         GB_SVC_UNIPRO_SLOW_AUTO_MODE,
918                                         2, 1,
919                                         0, 0);
920
921         if (ret)
922                 dev_warn(&svc->dev,
923                         "power mode change failed on AP to switch link: %d\n",
924                         ret);
925 }
926
927 static void gb_svc_process_intf_hotplug(struct gb_operation *operation)
928 {
929         struct gb_svc_intf_hotplug_request *request;
930         struct gb_connection *connection = operation->connection;
931         struct gb_svc *svc = gb_connection_get_data(connection);
932         struct gb_host_device *hd = connection->hd;
933         struct gb_module *module;
934         u8 intf_id;
935         int ret;
936
937         /* The request message size has already been verified. */
938         request = operation->request->payload;
939         intf_id = request->intf_id;
940
941         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
942
943         /* All modules are considered 1x2 for now */
944         module = gb_svc_module_lookup(svc, intf_id);
945         if (module) {
946                 /* legacy mode switch */
947                 return gb_interface_mailbox_event(module->interfaces[0], 0,
948                                                 GB_SVC_INTF_MAILBOX_GREYBUS);
949         }
950
951         module = gb_module_create(hd, intf_id, 1);
952         if (!module) {
953                 dev_err(&svc->dev, "failed to create module\n");
954                 return;
955         }
956
957         ret = gb_module_add(module);
958         if (ret) {
959                 gb_module_put(module);
960                 return;
961         }
962
963         list_add(&module->hd_node, &hd->modules);
964 }
965
966 static void gb_svc_process_intf_hot_unplug(struct gb_operation *operation)
967 {
968         struct gb_svc *svc = gb_connection_get_data(operation->connection);
969         struct gb_svc_intf_hot_unplug_request *request;
970         struct gb_module *module;
971         u8 intf_id;
972
973         /* The request message size has already been verified. */
974         request = operation->request->payload;
975         intf_id = request->intf_id;
976
977         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
978
979         /* All modules are considered 1x2 for now */
980         module = gb_svc_module_lookup(svc, intf_id);
981         if (!module) {
982                 dev_warn(&svc->dev, "could not find hot-unplug interface %u\n",
983                                 intf_id);
984                 return;
985         }
986
987         module->disconnected = true;
988
989         gb_module_del(module);
990         list_del(&module->hd_node);
991         gb_module_put(module);
992 }
993
994 static void gb_svc_process_module_inserted(struct gb_operation *operation)
995 {
996         struct gb_svc_module_inserted_request *request;
997         struct gb_connection *connection = operation->connection;
998         struct gb_svc *svc = gb_connection_get_data(connection);
999         struct gb_host_device *hd = svc->hd;
1000         struct gb_module *module;
1001         size_t num_interfaces;
1002         u8 module_id;
1003         u16 flags;
1004         int ret;
1005
1006         /* The request message size has already been verified. */
1007         request = operation->request->payload;
1008         module_id = request->primary_intf_id;
1009         num_interfaces = request->intf_count;
1010         flags = le16_to_cpu(request->flags);
1011
1012         dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
1013                         __func__, module_id, num_interfaces, flags);
1014
1015         if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
1016                 dev_warn(&svc->dev, "no primary interface detected on module %u\n",
1017                                 module_id);
1018         }
1019
1020         module = gb_svc_module_lookup(svc, module_id);
1021         if (module) {
1022                 dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
1023                                 module_id);
1024                 return;
1025         }
1026
1027         module = gb_module_create(hd, module_id, num_interfaces);
1028         if (!module) {
1029                 dev_err(&svc->dev, "failed to create module\n");
1030                 return;
1031         }
1032
1033         ret = gb_module_add(module);
1034         if (ret) {
1035                 gb_module_put(module);
1036                 return;
1037         }
1038
1039         list_add(&module->hd_node, &hd->modules);
1040 }
1041
1042 static void gb_svc_process_module_removed(struct gb_operation *operation)
1043 {
1044         struct gb_svc_module_removed_request *request;
1045         struct gb_connection *connection = operation->connection;
1046         struct gb_svc *svc = gb_connection_get_data(connection);
1047         struct gb_module *module;
1048         u8 module_id;
1049
1050         /* The request message size has already been verified. */
1051         request = operation->request->payload;
1052         module_id = request->primary_intf_id;
1053
1054         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
1055
1056         module = gb_svc_module_lookup(svc, module_id);
1057         if (!module) {
1058                 dev_warn(&svc->dev, "unexpected module-removed event %u\n",
1059                                 module_id);
1060                 return;
1061         }
1062
1063         module->disconnected = true;
1064
1065         gb_module_del(module);
1066         list_del(&module->hd_node);
1067         gb_module_put(module);
1068 }
1069
1070 static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
1071 {
1072         struct gb_svc_intf_mailbox_event_request *request;
1073         struct gb_connection *connection = operation->connection;
1074         struct gb_svc *svc = gb_connection_get_data(connection);
1075         struct gb_interface *intf;
1076         u8 intf_id;
1077         u16 result_code;
1078         u32 mailbox;
1079
1080         /* The request message size has already been verified. */
1081         request = operation->request->payload;
1082         intf_id = request->intf_id;
1083         result_code = le16_to_cpu(request->result_code);
1084         mailbox = le32_to_cpu(request->mailbox);
1085
1086         dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
1087                         __func__, intf_id, result_code, mailbox);
1088
1089         intf = gb_svc_interface_lookup(svc, intf_id);
1090         if (!intf) {
1091                 dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
1092                 return;
1093         }
1094
1095         gb_interface_mailbox_event(intf, result_code, mailbox);
1096 }
1097
1098 static void gb_svc_process_deferred_request(struct work_struct *work)
1099 {
1100         struct gb_svc_deferred_request *dr;
1101         struct gb_operation *operation;
1102         struct gb_svc *svc;
1103         u8 type;
1104
1105         dr = container_of(work, struct gb_svc_deferred_request, work);
1106         operation = dr->operation;
1107         svc = gb_connection_get_data(operation->connection);
1108         type = operation->request->header->type;
1109
1110         switch (type) {
1111         case GB_SVC_TYPE_SVC_HELLO:
1112                 gb_svc_process_hello_deferred(operation);
1113                 break;
1114         case GB_SVC_TYPE_INTF_HOTPLUG:
1115                 gb_svc_process_intf_hotplug(operation);
1116                 break;
1117         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
1118                 gb_svc_process_intf_hot_unplug(operation);
1119                 break;
1120         case GB_SVC_TYPE_MODULE_INSERTED:
1121                 gb_svc_process_module_inserted(operation);
1122                 break;
1123         case GB_SVC_TYPE_MODULE_REMOVED:
1124                 gb_svc_process_module_removed(operation);
1125                 break;
1126         case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1127                 gb_svc_process_intf_mailbox_event(operation);
1128                 break;
1129         default:
1130                 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
1131         }
1132
1133         gb_operation_put(operation);
1134         kfree(dr);
1135 }
1136
1137 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
1138 {
1139         struct gb_svc *svc = gb_connection_get_data(operation->connection);
1140         struct gb_svc_deferred_request *dr;
1141
1142         dr = kmalloc(sizeof(*dr), GFP_KERNEL);
1143         if (!dr)
1144                 return -ENOMEM;
1145
1146         gb_operation_get(operation);
1147
1148         dr->operation = operation;
1149         INIT_WORK(&dr->work, gb_svc_process_deferred_request);
1150
1151         queue_work(svc->wq, &dr->work);
1152
1153         return 0;
1154 }
1155
1156 /*
1157  * Bringing up a module can be time consuming, as that may require lots of
1158  * initialization on the module side. Over that, we may also need to download
1159  * the firmware first and flash that on the module.
1160  *
1161  * In order not to make other svc events wait for all this to finish,
1162  * handle most of module hotplug stuff outside of the hotplug callback, with
1163  * help of a workqueue.
1164  */
1165 static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
1166 {
1167         struct gb_svc *svc = gb_connection_get_data(op->connection);
1168         struct gb_svc_intf_hotplug_request *request;
1169
1170         if (op->request->payload_size < sizeof(*request)) {
1171                 dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n",
1172                                 op->request->payload_size, sizeof(*request));
1173                 return -EINVAL;
1174         }
1175
1176         request = op->request->payload;
1177
1178         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1179
1180         return gb_svc_queue_deferred_request(op);
1181 }
1182
1183 static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
1184 {
1185         struct gb_svc *svc = gb_connection_get_data(op->connection);
1186         struct gb_svc_intf_hot_unplug_request *request;
1187
1188         if (op->request->payload_size < sizeof(*request)) {
1189                 dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n",
1190                                 op->request->payload_size, sizeof(*request));
1191                 return -EINVAL;
1192         }
1193
1194         request = op->request->payload;
1195
1196         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1197
1198         return gb_svc_queue_deferred_request(op);
1199 }
1200
1201 static int gb_svc_intf_reset_recv(struct gb_operation *op)
1202 {
1203         struct gb_svc *svc = gb_connection_get_data(op->connection);
1204         struct gb_message *request = op->request;
1205         struct gb_svc_intf_reset_request *reset;
1206         u8 intf_id;
1207
1208         if (request->payload_size < sizeof(*reset)) {
1209                 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
1210                                 request->payload_size, sizeof(*reset));
1211                 return -EINVAL;
1212         }
1213         reset = request->payload;
1214
1215         intf_id = reset->intf_id;
1216
1217         /* FIXME Reset the interface here */
1218
1219         return 0;
1220 }
1221
1222 static int gb_svc_key_code_map(struct gb_svc *svc, u16 key_code, u16 *code)
1223 {
1224         switch (key_code) {
1225         case GB_KEYCODE_ARA:
1226                 *code = SVC_KEY_ARA_BUTTON;
1227                 break;
1228         default:
1229                 dev_warn(&svc->dev, "unknown keycode received: %u\n", key_code);
1230                 return -EINVAL;
1231         }
1232
1233         return 0;
1234 }
1235
1236 static int gb_svc_key_event_recv(struct gb_operation *op)
1237 {
1238         struct gb_svc *svc = gb_connection_get_data(op->connection);
1239         struct gb_message *request = op->request;
1240         struct gb_svc_key_event_request *key;
1241         u16 code;
1242         u8 event;
1243         int ret;
1244
1245         if (request->payload_size < sizeof(*key)) {
1246                 dev_warn(&svc->dev, "short key request received (%zu < %zu)\n",
1247                          request->payload_size, sizeof(*key));
1248                 return -EINVAL;
1249         }
1250
1251         key = request->payload;
1252
1253         ret = gb_svc_key_code_map(svc, le16_to_cpu(key->key_code), &code);
1254         if (ret < 0)
1255                 return ret;
1256
1257         event = key->key_event;
1258         if ((event != GB_SVC_KEY_PRESSED) && (event != GB_SVC_KEY_RELEASED)) {
1259                 dev_warn(&svc->dev, "unknown key event received: %u\n", event);
1260                 return -EINVAL;
1261         }
1262
1263         input_report_key(svc->input, code, (event == GB_SVC_KEY_PRESSED));
1264         input_sync(svc->input);
1265
1266         return 0;
1267 }
1268
1269 static int gb_svc_module_inserted_recv(struct gb_operation *op)
1270 {
1271         struct gb_svc *svc = gb_connection_get_data(op->connection);
1272         struct gb_svc_module_inserted_request *request;
1273
1274         if (op->request->payload_size < sizeof(*request)) {
1275                 dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
1276                                 op->request->payload_size, sizeof(*request));
1277                 return -EINVAL;
1278         }
1279
1280         request = op->request->payload;
1281
1282         dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1283                         request->primary_intf_id);
1284
1285         return gb_svc_queue_deferred_request(op);
1286 }
1287
1288 static int gb_svc_module_removed_recv(struct gb_operation *op)
1289 {
1290         struct gb_svc *svc = gb_connection_get_data(op->connection);
1291         struct gb_svc_module_removed_request *request;
1292
1293         if (op->request->payload_size < sizeof(*request)) {
1294                 dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
1295                                 op->request->payload_size, sizeof(*request));
1296                 return -EINVAL;
1297         }
1298
1299         request = op->request->payload;
1300
1301         dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1302                         request->primary_intf_id);
1303
1304         return gb_svc_queue_deferred_request(op);
1305 }
1306
1307 static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
1308 {
1309         struct gb_svc *svc = gb_connection_get_data(op->connection);
1310         struct gb_svc_intf_mailbox_event_request *request;
1311
1312         if (op->request->payload_size < sizeof(*request)) {
1313                 dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
1314                                 op->request->payload_size, sizeof(*request));
1315                 return -EINVAL;
1316         }
1317
1318         request = op->request->payload;
1319
1320         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1321
1322         return gb_svc_queue_deferred_request(op);
1323 }
1324
1325 static int gb_svc_request_handler(struct gb_operation *op)
1326 {
1327         struct gb_connection *connection = op->connection;
1328         struct gb_svc *svc = gb_connection_get_data(connection);
1329         u8 type = op->type;
1330         int ret = 0;
1331
1332         /*
1333          * SVC requests need to follow a specific order (at least initially) and
1334          * below code takes care of enforcing that. The expected order is:
1335          * - PROTOCOL_VERSION
1336          * - SVC_HELLO
1337          * - Any other request, but the earlier two.
1338          *
1339          * Incoming requests are guaranteed to be serialized and so we don't
1340          * need to protect 'state' for any races.
1341          */
1342         switch (type) {
1343         case GB_SVC_TYPE_PROTOCOL_VERSION:
1344                 if (svc->state != GB_SVC_STATE_RESET)
1345                         ret = -EINVAL;
1346                 break;
1347         case GB_SVC_TYPE_SVC_HELLO:
1348                 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
1349                         ret = -EINVAL;
1350                 break;
1351         default:
1352                 if (svc->state != GB_SVC_STATE_SVC_HELLO)
1353                         ret = -EINVAL;
1354                 break;
1355         }
1356
1357         if (ret) {
1358                 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
1359                                 type, svc->state);
1360                 return ret;
1361         }
1362
1363         switch (type) {
1364         case GB_SVC_TYPE_PROTOCOL_VERSION:
1365                 ret = gb_svc_version_request(op);
1366                 if (!ret)
1367                         svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
1368                 return ret;
1369         case GB_SVC_TYPE_SVC_HELLO:
1370                 ret = gb_svc_hello(op);
1371                 if (!ret)
1372                         svc->state = GB_SVC_STATE_SVC_HELLO;
1373                 return ret;
1374         case GB_SVC_TYPE_INTF_HOTPLUG:
1375                 return gb_svc_intf_hotplug_recv(op);
1376         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
1377                 return gb_svc_intf_hot_unplug_recv(op);
1378         case GB_SVC_TYPE_INTF_RESET:
1379                 return gb_svc_intf_reset_recv(op);
1380         case GB_SVC_TYPE_KEY_EVENT:
1381                 return gb_svc_key_event_recv(op);
1382         case GB_SVC_TYPE_MODULE_INSERTED:
1383                 return gb_svc_module_inserted_recv(op);
1384         case GB_SVC_TYPE_MODULE_REMOVED:
1385                 return gb_svc_module_removed_recv(op);
1386         case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1387                 return gb_svc_intf_mailbox_event_recv(op);
1388         default:
1389                 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
1390                 return -EINVAL;
1391         }
1392 }
1393
1394 static struct input_dev *gb_svc_input_create(struct gb_svc *svc)
1395 {
1396         struct input_dev *input_dev;
1397
1398         input_dev = input_allocate_device();
1399         if (!input_dev)
1400                 return ERR_PTR(-ENOMEM);
1401
1402         input_dev->name = dev_name(&svc->dev);
1403         svc->input_phys = kasprintf(GFP_KERNEL, "greybus-%s/input0",
1404                                     input_dev->name);
1405         if (!svc->input_phys)
1406                 goto err_free_input;
1407
1408         input_dev->phys = svc->input_phys;
1409         input_dev->dev.parent = &svc->dev;
1410
1411         input_set_drvdata(input_dev, svc);
1412
1413         input_set_capability(input_dev, EV_KEY, SVC_KEY_ARA_BUTTON);
1414
1415         return input_dev;
1416
1417 err_free_input:
1418         input_free_device(svc->input);
1419         return ERR_PTR(-ENOMEM);
1420 }
1421
1422 static void gb_svc_release(struct device *dev)
1423 {
1424         struct gb_svc *svc = to_gb_svc(dev);
1425
1426         if (svc->connection)
1427                 gb_connection_destroy(svc->connection);
1428         ida_destroy(&svc->device_id_map);
1429         destroy_workqueue(svc->wq);
1430         kfree(svc->input_phys);
1431         kfree(svc);
1432 }
1433
1434 struct device_type greybus_svc_type = {
1435         .name           = "greybus_svc",
1436         .release        = gb_svc_release,
1437 };
1438
1439 struct gb_svc *gb_svc_create(struct gb_host_device *hd)
1440 {
1441         struct gb_svc *svc;
1442
1443         svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1444         if (!svc)
1445                 return NULL;
1446
1447         svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
1448         if (!svc->wq) {
1449                 kfree(svc);
1450                 return NULL;
1451         }
1452
1453         svc->dev.parent = &hd->dev;
1454         svc->dev.bus = &greybus_bus_type;
1455         svc->dev.type = &greybus_svc_type;
1456         svc->dev.groups = svc_groups;
1457         svc->dev.dma_mask = svc->dev.parent->dma_mask;
1458         device_initialize(&svc->dev);
1459
1460         dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
1461
1462         ida_init(&svc->device_id_map);
1463         svc->state = GB_SVC_STATE_RESET;
1464         svc->hd = hd;
1465
1466         svc->input = gb_svc_input_create(svc);
1467         if (IS_ERR(svc->input)) {
1468                 dev_err(&svc->dev, "failed to create input device: %ld\n",
1469                         PTR_ERR(svc->input));
1470                 goto err_put_device;
1471         }
1472
1473         svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
1474                                                 gb_svc_request_handler);
1475         if (IS_ERR(svc->connection)) {
1476                 dev_err(&svc->dev, "failed to create connection: %ld\n",
1477                                 PTR_ERR(svc->connection));
1478                 goto err_free_input;
1479         }
1480
1481         gb_connection_set_data(svc->connection, svc);
1482
1483         return svc;
1484
1485 err_free_input:
1486         input_free_device(svc->input);
1487 err_put_device:
1488         put_device(&svc->dev);
1489         return NULL;
1490 }
1491
1492 int gb_svc_add(struct gb_svc *svc)
1493 {
1494         int ret;
1495
1496         /*
1497          * The SVC protocol is currently driven by the SVC, so the SVC device
1498          * is added from the connection request handler when enough
1499          * information has been received.
1500          */
1501         ret = gb_connection_enable(svc->connection);
1502         if (ret)
1503                 return ret;
1504
1505         return 0;
1506 }
1507
1508 static void gb_svc_remove_modules(struct gb_svc *svc)
1509 {
1510         struct gb_host_device *hd = svc->hd;
1511         struct gb_module *module, *tmp;
1512
1513         list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
1514                 gb_module_del(module);
1515                 list_del(&module->hd_node);
1516                 gb_module_put(module);
1517         }
1518 }
1519
1520 void gb_svc_del(struct gb_svc *svc)
1521 {
1522         gb_connection_disable(svc->connection);
1523
1524         /*
1525          * The SVC device and input device may have been registered
1526          * from the request handler.
1527          */
1528         if (device_is_registered(&svc->dev)) {
1529                 gb_svc_debugfs_exit(svc);
1530                 gb_svc_watchdog_destroy(svc);
1531                 input_unregister_device(svc->input);
1532                 device_del(&svc->dev);
1533         }
1534
1535         flush_workqueue(svc->wq);
1536
1537         gb_svc_remove_modules(svc);
1538 }
1539
1540 void gb_svc_put(struct gb_svc *svc)
1541 {
1542         put_device(&svc->dev);
1543 }