4176e231b14a69bfedd12d6f896f1b637243ad48
[cascardo/linux.git] / drivers / staging / greybus / svc.c
1 /*
2  * SVC Greybus driver.
3  *
4  * Copyright 2015 Google Inc.
5  * Copyright 2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/debugfs.h>
11 #include <linux/input.h>
12 #include <linux/workqueue.h>
13
14 #include "greybus.h"
15
16 #define SVC_KEY_ARA_BUTTON      KEY_A
17
18 #define SVC_INTF_EJECT_TIMEOUT          9000
19 #define SVC_INTF_ACTIVATE_TIMEOUT       6000
20
21 struct gb_svc_deferred_request {
22         struct work_struct work;
23         struct gb_operation *operation;
24 };
25
26
27 static int gb_svc_queue_deferred_request(struct gb_operation *operation);
28
29 static ssize_t endo_id_show(struct device *dev,
30                         struct device_attribute *attr, char *buf)
31 {
32         struct gb_svc *svc = to_gb_svc(dev);
33
34         return sprintf(buf, "0x%04x\n", svc->endo_id);
35 }
36 static DEVICE_ATTR_RO(endo_id);
37
38 static ssize_t ap_intf_id_show(struct device *dev,
39                         struct device_attribute *attr, char *buf)
40 {
41         struct gb_svc *svc = to_gb_svc(dev);
42
43         return sprintf(buf, "%u\n", svc->ap_intf_id);
44 }
45 static DEVICE_ATTR_RO(ap_intf_id);
46
47
48 // FIXME
49 // This is a hack, we need to do this "right" and clean the interface up
50 // properly, not just forcibly yank the thing out of the system and hope for the
51 // best.  But for now, people want their modules to come out without having to
52 // throw the thing to the ground or get out a screwdriver.
53 static ssize_t intf_eject_store(struct device *dev,
54                                 struct device_attribute *attr, const char *buf,
55                                 size_t len)
56 {
57         struct gb_svc *svc = to_gb_svc(dev);
58         unsigned short intf_id;
59         int ret;
60
61         ret = kstrtou16(buf, 10, &intf_id);
62         if (ret < 0)
63                 return ret;
64
65         dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
66
67         ret = gb_svc_intf_eject(svc, intf_id);
68         if (ret < 0)
69                 return ret;
70
71         return len;
72 }
73 static DEVICE_ATTR_WO(intf_eject);
74
75 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
76                              char *buf)
77 {
78         struct gb_svc *svc = to_gb_svc(dev);
79
80         return sprintf(buf, "%s\n",
81                        gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
82 }
83
84 static ssize_t watchdog_store(struct device *dev,
85                               struct device_attribute *attr, const char *buf,
86                               size_t len)
87 {
88         struct gb_svc *svc = to_gb_svc(dev);
89         int retval;
90         bool user_request;
91
92         retval = strtobool(buf, &user_request);
93         if (retval)
94                 return retval;
95
96         if (user_request)
97                 retval = gb_svc_watchdog_enable(svc);
98         else
99                 retval = gb_svc_watchdog_disable(svc);
100         if (retval)
101                 return retval;
102         return len;
103 }
104 static DEVICE_ATTR_RW(watchdog);
105
106 static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
107 {
108         struct gb_svc_pwrmon_rail_count_get_response response;
109         int ret;
110
111         ret = gb_operation_sync(svc->connection,
112                                 GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
113                                 &response, sizeof(response));
114         if (ret) {
115                 dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
116                 return ret;
117         }
118
119         *value = response.rail_count;
120
121         return 0;
122 }
123
124 static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
125                 struct gb_svc_pwrmon_rail_names_get_response *response,
126                 size_t bufsize)
127 {
128         int ret;
129
130         ret = gb_operation_sync(svc->connection,
131                                 GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
132                                 response, bufsize);
133         if (ret) {
134                 dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
135                 return ret;
136         }
137
138         return 0;
139 }
140
141 static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
142                                     u8 measurement_type, u32 *value)
143 {
144         struct gb_svc_pwrmon_sample_get_request request;
145         struct gb_svc_pwrmon_sample_get_response response;
146         int ret;
147
148         request.rail_id = rail_id;
149         request.measurement_type = measurement_type;
150
151         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
152                                 &request, sizeof(request),
153                                 &response, sizeof(response));
154         if (ret) {
155                 dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
156                 return ret;
157         }
158
159         if (response.result) {
160                 dev_err(&svc->dev,
161                         "UniPro error while getting rail power sample (%d %d): %d\n",
162                         rail_id, measurement_type, response.result);
163                 switch (response.result) {
164                 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
165                         return -EINVAL;
166                 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
167                         return -ENOMSG;
168                 default:
169                         return -EREMOTEIO;
170                 }
171         }
172
173         *value = le32_to_cpu(response.measurement);
174
175         return 0;
176 }
177
178 int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
179                                   u8 measurement_type, u32 *value)
180 {
181         struct gb_svc_pwrmon_intf_sample_get_request request;
182         struct gb_svc_pwrmon_intf_sample_get_response response;
183         int ret;
184
185         request.intf_id = intf_id;
186         request.measurement_type = measurement_type;
187
188         ret = gb_operation_sync(svc->connection,
189                                 GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
190                                 &request, sizeof(request),
191                                 &response, sizeof(response));
192         if (ret) {
193                 dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
194                 return ret;
195         }
196
197         if (response.result) {
198                 dev_err(&svc->dev,
199                         "UniPro error while getting intf power sample (%d %d): %d\n",
200                         intf_id, measurement_type, response.result);
201                 switch (response.result) {
202                 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
203                         return -EINVAL;
204                 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
205                         return -ENOMSG;
206                 default:
207                         return -EREMOTEIO;
208                 }
209         }
210
211         *value = le32_to_cpu(response.measurement);
212
213         return 0;
214 }
215
216 static struct attribute *svc_attrs[] = {
217         &dev_attr_endo_id.attr,
218         &dev_attr_ap_intf_id.attr,
219         &dev_attr_intf_eject.attr,
220         &dev_attr_watchdog.attr,
221         NULL,
222 };
223 ATTRIBUTE_GROUPS(svc);
224
225 int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
226 {
227         struct gb_svc_intf_device_id_request request;
228
229         request.intf_id = intf_id;
230         request.device_id = device_id;
231
232         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
233                                  &request, sizeof(request), NULL, 0);
234 }
235
236 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
237 {
238         struct gb_svc_intf_eject_request request;
239         int ret;
240
241         request.intf_id = intf_id;
242
243         /*
244          * The pulse width for module release in svc is long so we need to
245          * increase the timeout so the operation will not return to soon.
246          */
247         ret = gb_operation_sync_timeout(svc->connection,
248                                         GB_SVC_TYPE_INTF_EJECT, &request,
249                                         sizeof(request), NULL, 0,
250                                         SVC_INTF_EJECT_TIMEOUT);
251         if (ret) {
252                 dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
253                 return ret;
254         }
255
256         return 0;
257 }
258
259 int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
260 {
261         struct gb_svc_intf_vsys_request request;
262         struct gb_svc_intf_vsys_response response;
263         int type, ret;
264
265         request.intf_id = intf_id;
266
267         if (enable)
268                 type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
269         else
270                 type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
271
272         ret = gb_operation_sync(svc->connection, type,
273                         &request, sizeof(request),
274                         &response, sizeof(response));
275         if (ret < 0)
276                 return ret;
277         if (response.result_code != GB_SVC_INTF_VSYS_OK)
278                 return -EREMOTEIO;
279         return 0;
280 }
281
282 int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
283 {
284         struct gb_svc_intf_refclk_request request;
285         struct gb_svc_intf_refclk_response response;
286         int type, ret;
287
288         request.intf_id = intf_id;
289
290         if (enable)
291                 type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
292         else
293                 type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
294
295         ret = gb_operation_sync(svc->connection, type,
296                         &request, sizeof(request),
297                         &response, sizeof(response));
298         if (ret < 0)
299                 return ret;
300         if (response.result_code != GB_SVC_INTF_REFCLK_OK)
301                 return -EREMOTEIO;
302         return 0;
303 }
304
305 int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
306 {
307         struct gb_svc_intf_unipro_request request;
308         struct gb_svc_intf_unipro_response response;
309         int type, ret;
310
311         request.intf_id = intf_id;
312
313         if (enable)
314                 type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
315         else
316                 type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
317
318         ret = gb_operation_sync(svc->connection, type,
319                         &request, sizeof(request),
320                         &response, sizeof(response));
321         if (ret < 0)
322                 return ret;
323         if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
324                 return -EREMOTEIO;
325         return 0;
326 }
327
328 int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
329 {
330         struct gb_svc_intf_activate_request request;
331         struct gb_svc_intf_activate_response response;
332         int ret;
333
334         request.intf_id = intf_id;
335
336         ret = gb_operation_sync_timeout(svc->connection,
337                         GB_SVC_TYPE_INTF_ACTIVATE,
338                         &request, sizeof(request),
339                         &response, sizeof(response),
340                         SVC_INTF_ACTIVATE_TIMEOUT);
341         if (ret < 0)
342                 return ret;
343         if (response.status != GB_SVC_OP_SUCCESS) {
344                 dev_err(&svc->dev, "failed to activate interface %u: %u\n",
345                                 intf_id, response.status);
346                 return -EREMOTEIO;
347         }
348
349         *intf_type = response.intf_type;
350
351         return 0;
352 }
353
354 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
355                         u32 *value)
356 {
357         struct gb_svc_dme_peer_get_request request;
358         struct gb_svc_dme_peer_get_response response;
359         u16 result;
360         int ret;
361
362         request.intf_id = intf_id;
363         request.attr = cpu_to_le16(attr);
364         request.selector = cpu_to_le16(selector);
365
366         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
367                                 &request, sizeof(request),
368                                 &response, sizeof(response));
369         if (ret) {
370                 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
371                                 intf_id, attr, selector, ret);
372                 return ret;
373         }
374
375         result = le16_to_cpu(response.result_code);
376         if (result) {
377                 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
378                                 intf_id, attr, selector, result);
379                 return -EREMOTEIO;
380         }
381
382         if (value)
383                 *value = le32_to_cpu(response.attr_value);
384
385         return 0;
386 }
387 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
388
389 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
390                         u32 value)
391 {
392         struct gb_svc_dme_peer_set_request request;
393         struct gb_svc_dme_peer_set_response response;
394         u16 result;
395         int ret;
396
397         request.intf_id = intf_id;
398         request.attr = cpu_to_le16(attr);
399         request.selector = cpu_to_le16(selector);
400         request.value = cpu_to_le32(value);
401
402         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
403                                 &request, sizeof(request),
404                                 &response, sizeof(response));
405         if (ret) {
406                 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
407                                 intf_id, attr, selector, value, ret);
408                 return ret;
409         }
410
411         result = le16_to_cpu(response.result_code);
412         if (result) {
413                 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
414                                 intf_id, attr, selector, value, result);
415                 return -EREMOTEIO;
416         }
417
418         return 0;
419 }
420 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
421
422 int gb_svc_connection_create(struct gb_svc *svc,
423                                 u8 intf1_id, u16 cport1_id,
424                                 u8 intf2_id, u16 cport2_id,
425                                 u8 cport_flags)
426 {
427         struct gb_svc_conn_create_request request;
428
429         request.intf1_id = intf1_id;
430         request.cport1_id = cpu_to_le16(cport1_id);
431         request.intf2_id = intf2_id;
432         request.cport2_id = cpu_to_le16(cport2_id);
433         request.tc = 0;         /* TC0 */
434         request.flags = cport_flags;
435
436         return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
437                                  &request, sizeof(request), NULL, 0);
438 }
439 EXPORT_SYMBOL_GPL(gb_svc_connection_create);
440
441 void gb_svc_connection_quiescing(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
442                                         u8 intf2_id, u16 cport2_id)
443 {
444         /* FIXME: implement */
445
446         dev_dbg(&svc->dev, "%s - (%u:%u %u:%u)\n", __func__,
447                                 intf1_id, cport1_id, intf2_id, cport2_id);
448 }
449 EXPORT_SYMBOL_GPL(gb_svc_connection_quiescing);
450
451 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
452                                u8 intf2_id, u16 cport2_id)
453 {
454         struct gb_svc_conn_destroy_request request;
455         struct gb_connection *connection = svc->connection;
456         int ret;
457
458         request.intf1_id = intf1_id;
459         request.cport1_id = cpu_to_le16(cport1_id);
460         request.intf2_id = intf2_id;
461         request.cport2_id = cpu_to_le16(cport2_id);
462
463         ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
464                                 &request, sizeof(request), NULL, 0);
465         if (ret) {
466                 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
467                                 intf1_id, cport1_id, intf2_id, cport2_id, ret);
468         }
469 }
470 EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
471
472 int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
473                            u32 strobe_delay, u32 refclk)
474 {
475         struct gb_connection *connection = svc->connection;
476         struct gb_svc_timesync_enable_request request;
477
478         request.count = count;
479         request.frame_time = cpu_to_le64(frame_time);
480         request.strobe_delay = cpu_to_le32(strobe_delay);
481         request.refclk = cpu_to_le32(refclk);
482         return gb_operation_sync(connection,
483                                  GB_SVC_TYPE_TIMESYNC_ENABLE,
484                                  &request, sizeof(request), NULL, 0);
485 }
486 EXPORT_SYMBOL_GPL(gb_svc_timesync_enable);
487
488 int gb_svc_timesync_disable(struct gb_svc *svc)
489 {
490         struct gb_connection *connection = svc->connection;
491
492         return gb_operation_sync(connection,
493                                  GB_SVC_TYPE_TIMESYNC_DISABLE,
494                                  NULL, 0, NULL, 0);
495 }
496 EXPORT_SYMBOL_GPL(gb_svc_timesync_disable);
497
498 int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time)
499 {
500         struct gb_connection *connection = svc->connection;
501         struct gb_svc_timesync_authoritative_response response;
502         int ret, i;
503
504         ret = gb_operation_sync(connection,
505                                 GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE, NULL, 0,
506                                 &response, sizeof(response));
507         if (ret < 0)
508                 return ret;
509
510         for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
511                 frame_time[i] = le64_to_cpu(response.frame_time[i]);
512         return 0;
513 }
514 EXPORT_SYMBOL_GPL(gb_svc_timesync_authoritative);
515
516 int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time)
517 {
518         struct gb_connection *connection = svc->connection;
519         struct gb_svc_timesync_ping_response response;
520         int ret;
521
522         ret = gb_operation_sync(connection,
523                                 GB_SVC_TYPE_TIMESYNC_PING,
524                                 NULL, 0,
525                                 &response, sizeof(response));
526         if (ret < 0)
527                 return ret;
528
529         *frame_time = le64_to_cpu(response.frame_time);
530         return 0;
531 }
532 EXPORT_SYMBOL_GPL(gb_svc_timesync_ping);
533
534 int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask)
535 {
536         struct gb_connection *connection = svc->connection;
537         struct gb_svc_timesync_wake_pins_acquire_request request;
538
539         request.strobe_mask = cpu_to_le32(strobe_mask);
540         return gb_operation_sync(connection,
541                                  GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE,
542                                  &request, sizeof(request),
543                                  NULL, 0);
544 }
545 EXPORT_SYMBOL_GPL(gb_svc_timesync_wake_pins_acquire);
546
547 int gb_svc_timesync_wake_pins_release(struct gb_svc *svc)
548 {
549         struct gb_connection *connection = svc->connection;
550
551         return gb_operation_sync(connection,
552                                  GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE,
553                                  NULL, 0, NULL, 0);
554 }
555 EXPORT_SYMBOL_GPL(gb_svc_timesync_wake_pins_release);
556
557 /* Creates bi-directional routes between the devices */
558 int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
559                                u8 intf2_id, u8 dev2_id)
560 {
561         struct gb_svc_route_create_request request;
562
563         request.intf1_id = intf1_id;
564         request.dev1_id = dev1_id;
565         request.intf2_id = intf2_id;
566         request.dev2_id = dev2_id;
567
568         return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
569                                  &request, sizeof(request), NULL, 0);
570 }
571
572 /* Destroys bi-directional routes between the devices */
573 void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
574 {
575         struct gb_svc_route_destroy_request request;
576         int ret;
577
578         request.intf1_id = intf1_id;
579         request.intf2_id = intf2_id;
580
581         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
582                                 &request, sizeof(request), NULL, 0);
583         if (ret) {
584                 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
585                                 intf1_id, intf2_id, ret);
586         }
587 }
588
589 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
590                                u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
591                                u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
592                                u8 flags, u32 quirks)
593 {
594         struct gb_svc_intf_set_pwrm_request request;
595         struct gb_svc_intf_set_pwrm_response response;
596         int ret;
597
598         request.intf_id = intf_id;
599         request.hs_series = hs_series;
600         request.tx_mode = tx_mode;
601         request.tx_gear = tx_gear;
602         request.tx_nlanes = tx_nlanes;
603         request.rx_mode = rx_mode;
604         request.rx_gear = rx_gear;
605         request.rx_nlanes = rx_nlanes;
606         request.flags = flags;
607         request.quirks = cpu_to_le32(quirks);
608
609         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
610                                 &request, sizeof(request),
611                                 &response, sizeof(response));
612         if (ret < 0)
613                 return ret;
614
615         return le16_to_cpu(response.result_code);
616 }
617 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
618
619 int gb_svc_ping(struct gb_svc *svc)
620 {
621         return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
622                                          NULL, 0, NULL, 0,
623                                          GB_OPERATION_TIMEOUT_DEFAULT * 2);
624 }
625 EXPORT_SYMBOL_GPL(gb_svc_ping);
626
627 static int gb_svc_version_request(struct gb_operation *op)
628 {
629         struct gb_connection *connection = op->connection;
630         struct gb_svc *svc = gb_connection_get_data(connection);
631         struct gb_svc_version_request *request;
632         struct gb_svc_version_response *response;
633
634         if (op->request->payload_size < sizeof(*request)) {
635                 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
636                                 op->request->payload_size,
637                                 sizeof(*request));
638                 return -EINVAL;
639         }
640
641         request = op->request->payload;
642
643         if (request->major > GB_SVC_VERSION_MAJOR) {
644                 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
645                                 request->major, GB_SVC_VERSION_MAJOR);
646                 return -ENOTSUPP;
647         }
648
649         svc->protocol_major = request->major;
650         svc->protocol_minor = request->minor;
651
652         if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
653                 return -ENOMEM;
654
655         response = op->response->payload;
656         response->major = svc->protocol_major;
657         response->minor = svc->protocol_minor;
658
659         return 0;
660 }
661
662 static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
663                                         size_t len, loff_t *offset)
664 {
665         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
666         struct gb_svc *svc = pwrmon_rails->svc;
667         int ret, desc;
668         u32 value;
669         char buff[16];
670
671         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
672                                        GB_SVC_PWRMON_TYPE_VOL, &value);
673         if (ret) {
674                 dev_err(&svc->dev,
675                         "failed to get voltage sample %u: %d\n",
676                         pwrmon_rails->id, ret);
677                 return ret;
678         }
679
680         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
681
682         return simple_read_from_buffer(buf, len, offset, buff, desc);
683 }
684
685 static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
686                                         size_t len, loff_t *offset)
687 {
688         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
689         struct gb_svc *svc = pwrmon_rails->svc;
690         int ret, desc;
691         u32 value;
692         char buff[16];
693
694         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
695                                        GB_SVC_PWRMON_TYPE_CURR, &value);
696         if (ret) {
697                 dev_err(&svc->dev,
698                         "failed to get current sample %u: %d\n",
699                         pwrmon_rails->id, ret);
700                 return ret;
701         }
702
703         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
704
705         return simple_read_from_buffer(buf, len, offset, buff, desc);
706 }
707
708 static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
709                                       size_t len, loff_t *offset)
710 {
711         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
712         struct gb_svc *svc = pwrmon_rails->svc;
713         int ret, desc;
714         u32 value;
715         char buff[16];
716
717         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
718                                        GB_SVC_PWRMON_TYPE_PWR, &value);
719         if (ret) {
720                 dev_err(&svc->dev, "failed to get power sample %u: %d\n",
721                         pwrmon_rails->id, ret);
722                 return ret;
723         }
724
725         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
726
727         return simple_read_from_buffer(buf, len, offset, buff, desc);
728 }
729
730 static const struct file_operations pwrmon_debugfs_voltage_fops = {
731         .read           = pwr_debugfs_voltage_read,
732 };
733
734 static const struct file_operations pwrmon_debugfs_current_fops = {
735         .read           = pwr_debugfs_current_read,
736 };
737
738 static const struct file_operations pwrmon_debugfs_power_fops = {
739         .read           = pwr_debugfs_power_read,
740 };
741
742 static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
743 {
744         int i;
745         size_t bufsize;
746         struct dentry *dent;
747         struct gb_svc_pwrmon_rail_names_get_response *rail_names;
748         u8 rail_count;
749
750         dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
751         if (IS_ERR_OR_NULL(dent))
752                 return;
753
754         if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
755                 goto err_pwrmon_debugfs;
756
757         if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
758                 goto err_pwrmon_debugfs;
759
760         bufsize = GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
761
762         rail_names = kzalloc(bufsize, GFP_KERNEL);
763         if (!rail_names)
764                 goto err_pwrmon_debugfs;
765
766         svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
767                                     GFP_KERNEL);
768         if (!svc->pwrmon_rails)
769                 goto err_pwrmon_debugfs_free;
770
771         if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
772                 goto err_pwrmon_debugfs_free;
773
774         for (i = 0; i < rail_count; i++) {
775                 struct dentry *dir;
776                 struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
777                 char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
778
779                 snprintf(fname, sizeof(fname), "%s",
780                          (char *)&rail_names->name[i]);
781
782                 rail->id = i;
783                 rail->svc = svc;
784
785                 dir = debugfs_create_dir(fname, dent);
786                 debugfs_create_file("voltage_now", S_IRUGO, dir, rail,
787                                     &pwrmon_debugfs_voltage_fops);
788                 debugfs_create_file("current_now", S_IRUGO, dir, rail,
789                                     &pwrmon_debugfs_current_fops);
790                 debugfs_create_file("power_now", S_IRUGO, dir, rail,
791                                     &pwrmon_debugfs_power_fops);
792         }
793
794         kfree(rail_names);
795         return;
796
797 err_pwrmon_debugfs_free:
798         kfree(rail_names);
799         kfree(svc->pwrmon_rails);
800         svc->pwrmon_rails = NULL;
801
802 err_pwrmon_debugfs:
803         debugfs_remove(dent);
804 }
805
806 static void gb_svc_debugfs_init(struct gb_svc *svc)
807 {
808         svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
809                                                  gb_debugfs_get());
810         gb_svc_pwrmon_debugfs_init(svc);
811 }
812
813 static void gb_svc_debugfs_exit(struct gb_svc *svc)
814 {
815         debugfs_remove_recursive(svc->debugfs_dentry);
816         kfree(svc->pwrmon_rails);
817         svc->pwrmon_rails = NULL;
818 }
819
820 static int gb_svc_hello(struct gb_operation *op)
821 {
822         struct gb_connection *connection = op->connection;
823         struct gb_svc *svc = gb_connection_get_data(connection);
824         struct gb_svc_hello_request *hello_request;
825         int ret;
826
827         if (op->request->payload_size < sizeof(*hello_request)) {
828                 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
829                                 op->request->payload_size,
830                                 sizeof(*hello_request));
831                 return -EINVAL;
832         }
833
834         hello_request = op->request->payload;
835         svc->endo_id = le16_to_cpu(hello_request->endo_id);
836         svc->ap_intf_id = hello_request->interface_id;
837
838         ret = device_add(&svc->dev);
839         if (ret) {
840                 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
841                 return ret;
842         }
843
844         ret = input_register_device(svc->input);
845         if (ret) {
846                 dev_err(&svc->dev, "failed to register input: %d\n", ret);
847                 device_del(&svc->dev);
848                 return ret;
849         }
850
851         ret = gb_svc_watchdog_create(svc);
852         if (ret) {
853                 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
854                 input_unregister_device(svc->input);
855                 device_del(&svc->dev);
856                 return ret;
857         }
858
859         gb_svc_debugfs_init(svc);
860
861         return gb_svc_queue_deferred_request(op);
862 }
863
864 static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
865                                                         u8 intf_id)
866 {
867         struct gb_host_device *hd = svc->hd;
868         struct gb_module *module;
869         size_t num_interfaces;
870         u8 module_id;
871
872         list_for_each_entry(module, &hd->modules, hd_node) {
873                 module_id = module->module_id;
874                 num_interfaces = module->num_interfaces;
875
876                 if (intf_id >= module_id &&
877                                 intf_id < module_id + num_interfaces) {
878                         return module->interfaces[intf_id - module_id];
879                 }
880         }
881
882         return NULL;
883 }
884
885 static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
886 {
887         struct gb_host_device *hd = svc->hd;
888         struct gb_module *module;
889
890         list_for_each_entry(module, &hd->modules, hd_node) {
891                 if (module->module_id == module_id)
892                         return module;
893         }
894
895         return NULL;
896 }
897
898 static void gb_svc_intf_reenable(struct gb_svc *svc, struct gb_interface *intf)
899 {
900         int ret;
901
902         mutex_lock(&intf->mutex);
903
904         /* Mark as disconnected to prevent I/O during disable. */
905         intf->disconnected = true;
906         gb_interface_disable(intf);
907         intf->disconnected = false;
908
909         ret = gb_interface_enable(intf);
910         if (ret) {
911                 dev_err(&svc->dev, "failed to enable interface %u: %d\n",
912                                 intf->interface_id, ret);
913
914                 gb_interface_deactivate(intf);
915         }
916
917         mutex_unlock(&intf->mutex);
918 }
919
920 static void gb_svc_process_hello_deferred(struct gb_operation *operation)
921 {
922         struct gb_connection *connection = operation->connection;
923         struct gb_svc *svc = gb_connection_get_data(connection);
924         int ret;
925
926         /*
927          * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
928          * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
929          * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
930          * module.
931          *
932          * The code should be removed once SW-2217, Heuristic for UniPro
933          * Power Mode Changes is resolved.
934          */
935         ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
936                                         GB_SVC_UNIPRO_HS_SERIES_A,
937                                         GB_SVC_UNIPRO_SLOW_AUTO_MODE,
938                                         2, 1,
939                                         GB_SVC_UNIPRO_SLOW_AUTO_MODE,
940                                         2, 1,
941                                         0, 0);
942
943         if (ret)
944                 dev_warn(&svc->dev,
945                         "power mode change failed on AP to switch link: %d\n",
946                         ret);
947 }
948
949 static void gb_svc_process_intf_hotplug(struct gb_operation *operation)
950 {
951         struct gb_svc_intf_hotplug_request *request;
952         struct gb_connection *connection = operation->connection;
953         struct gb_svc *svc = gb_connection_get_data(connection);
954         struct gb_host_device *hd = connection->hd;
955         struct gb_module *module;
956         u8 intf_id;
957         int ret;
958
959         /* The request message size has already been verified. */
960         request = operation->request->payload;
961         intf_id = request->intf_id;
962
963         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
964
965         /* All modules are considered 1x2 for now */
966         module = gb_svc_module_lookup(svc, intf_id);
967         if (module) {
968                 dev_info(&svc->dev, "mode switch detected on interface %u\n",
969                                 intf_id);
970
971                 return gb_svc_intf_reenable(svc, module->interfaces[0]);
972         }
973
974         module = gb_module_create(hd, intf_id, 1);
975         if (!module) {
976                 dev_err(&svc->dev, "failed to create module\n");
977                 return;
978         }
979
980         ret = gb_module_add(module);
981         if (ret) {
982                 gb_module_put(module);
983                 return;
984         }
985
986         list_add(&module->hd_node, &hd->modules);
987 }
988
989 static void gb_svc_process_intf_hot_unplug(struct gb_operation *operation)
990 {
991         struct gb_svc *svc = gb_connection_get_data(operation->connection);
992         struct gb_svc_intf_hot_unplug_request *request;
993         struct gb_module *module;
994         u8 intf_id;
995
996         /* The request message size has already been verified. */
997         request = operation->request->payload;
998         intf_id = request->intf_id;
999
1000         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
1001
1002         /* All modules are considered 1x2 for now */
1003         module = gb_svc_module_lookup(svc, intf_id);
1004         if (!module) {
1005                 dev_warn(&svc->dev, "could not find hot-unplug interface %u\n",
1006                                 intf_id);
1007                 return;
1008         }
1009
1010         module->disconnected = true;
1011
1012         gb_module_del(module);
1013         list_del(&module->hd_node);
1014         gb_module_put(module);
1015 }
1016
1017 static void gb_svc_process_module_inserted(struct gb_operation *operation)
1018 {
1019         struct gb_svc_module_inserted_request *request;
1020         struct gb_connection *connection = operation->connection;
1021         struct gb_svc *svc = gb_connection_get_data(connection);
1022         struct gb_host_device *hd = svc->hd;
1023         struct gb_module *module;
1024         size_t num_interfaces;
1025         u8 module_id;
1026         u16 flags;
1027         int ret;
1028
1029         /* The request message size has already been verified. */
1030         request = operation->request->payload;
1031         module_id = request->primary_intf_id;
1032         num_interfaces = request->intf_count;
1033         flags = le16_to_cpu(request->flags);
1034
1035         dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
1036                         __func__, module_id, num_interfaces, flags);
1037
1038         if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
1039                 dev_warn(&svc->dev, "no primary interface detected on module %u\n",
1040                                 module_id);
1041         }
1042
1043         module = gb_svc_module_lookup(svc, module_id);
1044         if (module) {
1045                 dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
1046                                 module_id);
1047                 return;
1048         }
1049
1050         module = gb_module_create(hd, module_id, num_interfaces);
1051         if (!module) {
1052                 dev_err(&svc->dev, "failed to create module\n");
1053                 return;
1054         }
1055
1056         ret = gb_module_add(module);
1057         if (ret) {
1058                 gb_module_put(module);
1059                 return;
1060         }
1061
1062         list_add(&module->hd_node, &hd->modules);
1063 }
1064
1065 static void gb_svc_process_module_removed(struct gb_operation *operation)
1066 {
1067         struct gb_svc_module_removed_request *request;
1068         struct gb_connection *connection = operation->connection;
1069         struct gb_svc *svc = gb_connection_get_data(connection);
1070         struct gb_module *module;
1071         u8 module_id;
1072
1073         /* The request message size has already been verified. */
1074         request = operation->request->payload;
1075         module_id = request->primary_intf_id;
1076
1077         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
1078
1079         module = gb_svc_module_lookup(svc, module_id);
1080         if (!module) {
1081                 dev_warn(&svc->dev, "unexpected module-removed event %u\n",
1082                                 module_id);
1083                 return;
1084         }
1085
1086         module->disconnected = true;
1087
1088         gb_module_del(module);
1089         list_del(&module->hd_node);
1090         gb_module_put(module);
1091 }
1092
1093 static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
1094 {
1095         struct gb_svc_intf_mailbox_event_request *request;
1096         struct gb_connection *connection = operation->connection;
1097         struct gb_svc *svc = gb_connection_get_data(connection);
1098         struct gb_interface *intf;
1099         u8 intf_id;
1100         u16 result_code;
1101         u32 mailbox;
1102
1103         /* The request message size has already been verified. */
1104         request = operation->request->payload;
1105         intf_id = request->intf_id;
1106         result_code = le16_to_cpu(request->result_code);
1107         mailbox = le32_to_cpu(request->mailbox);
1108
1109         dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
1110                         __func__, intf_id, result_code, mailbox);
1111
1112         intf = gb_svc_interface_lookup(svc, intf_id);
1113         if (!intf) {
1114                 dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
1115                 return;
1116         }
1117
1118         if (result_code) {
1119                 dev_warn(&svc->dev,
1120                                 "mailbox event %u with UniPro error: 0x%04x\n",
1121                                 intf_id, result_code);
1122                 goto err_disable_interface;
1123         }
1124
1125         if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
1126                 dev_warn(&svc->dev,
1127                                 "mailbox event %u with unexected value: 0x%08x\n",
1128                                 intf_id, mailbox);
1129                 goto err_disable_interface;
1130         }
1131
1132         dev_info(&svc->dev, "mode switch detected on interface %u\n", intf_id);
1133
1134         gb_svc_intf_reenable(svc, intf);
1135
1136         return;
1137
1138 err_disable_interface:
1139         mutex_lock(&intf->mutex);
1140         gb_interface_disable(intf);
1141         gb_interface_deactivate(intf);
1142         mutex_unlock(&intf->mutex);
1143 }
1144
1145 static void gb_svc_process_deferred_request(struct work_struct *work)
1146 {
1147         struct gb_svc_deferred_request *dr;
1148         struct gb_operation *operation;
1149         struct gb_svc *svc;
1150         u8 type;
1151
1152         dr = container_of(work, struct gb_svc_deferred_request, work);
1153         operation = dr->operation;
1154         svc = gb_connection_get_data(operation->connection);
1155         type = operation->request->header->type;
1156
1157         switch (type) {
1158         case GB_SVC_TYPE_SVC_HELLO:
1159                 gb_svc_process_hello_deferred(operation);
1160                 break;
1161         case GB_SVC_TYPE_INTF_HOTPLUG:
1162                 gb_svc_process_intf_hotplug(operation);
1163                 break;
1164         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
1165                 gb_svc_process_intf_hot_unplug(operation);
1166                 break;
1167         case GB_SVC_TYPE_MODULE_INSERTED:
1168                 gb_svc_process_module_inserted(operation);
1169                 break;
1170         case GB_SVC_TYPE_MODULE_REMOVED:
1171                 gb_svc_process_module_removed(operation);
1172                 break;
1173         case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1174                 gb_svc_process_intf_mailbox_event(operation);
1175                 break;
1176         default:
1177                 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
1178         }
1179
1180         gb_operation_put(operation);
1181         kfree(dr);
1182 }
1183
1184 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
1185 {
1186         struct gb_svc *svc = gb_connection_get_data(operation->connection);
1187         struct gb_svc_deferred_request *dr;
1188
1189         dr = kmalloc(sizeof(*dr), GFP_KERNEL);
1190         if (!dr)
1191                 return -ENOMEM;
1192
1193         gb_operation_get(operation);
1194
1195         dr->operation = operation;
1196         INIT_WORK(&dr->work, gb_svc_process_deferred_request);
1197
1198         queue_work(svc->wq, &dr->work);
1199
1200         return 0;
1201 }
1202
1203 /*
1204  * Bringing up a module can be time consuming, as that may require lots of
1205  * initialization on the module side. Over that, we may also need to download
1206  * the firmware first and flash that on the module.
1207  *
1208  * In order not to make other svc events wait for all this to finish,
1209  * handle most of module hotplug stuff outside of the hotplug callback, with
1210  * help of a workqueue.
1211  */
1212 static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
1213 {
1214         struct gb_svc *svc = gb_connection_get_data(op->connection);
1215         struct gb_svc_intf_hotplug_request *request;
1216
1217         if (op->request->payload_size < sizeof(*request)) {
1218                 dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n",
1219                                 op->request->payload_size, sizeof(*request));
1220                 return -EINVAL;
1221         }
1222
1223         request = op->request->payload;
1224
1225         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1226
1227         return gb_svc_queue_deferred_request(op);
1228 }
1229
1230 static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
1231 {
1232         struct gb_svc *svc = gb_connection_get_data(op->connection);
1233         struct gb_svc_intf_hot_unplug_request *request;
1234
1235         if (op->request->payload_size < sizeof(*request)) {
1236                 dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n",
1237                                 op->request->payload_size, sizeof(*request));
1238                 return -EINVAL;
1239         }
1240
1241         request = op->request->payload;
1242
1243         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1244
1245         return gb_svc_queue_deferred_request(op);
1246 }
1247
1248 static int gb_svc_intf_reset_recv(struct gb_operation *op)
1249 {
1250         struct gb_svc *svc = gb_connection_get_data(op->connection);
1251         struct gb_message *request = op->request;
1252         struct gb_svc_intf_reset_request *reset;
1253         u8 intf_id;
1254
1255         if (request->payload_size < sizeof(*reset)) {
1256                 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
1257                                 request->payload_size, sizeof(*reset));
1258                 return -EINVAL;
1259         }
1260         reset = request->payload;
1261
1262         intf_id = reset->intf_id;
1263
1264         /* FIXME Reset the interface here */
1265
1266         return 0;
1267 }
1268
1269 static int gb_svc_key_code_map(struct gb_svc *svc, u16 key_code, u16 *code)
1270 {
1271         switch (key_code) {
1272         case GB_KEYCODE_ARA:
1273                 *code = SVC_KEY_ARA_BUTTON;
1274                 break;
1275         default:
1276                 dev_warn(&svc->dev, "unknown keycode received: %u\n", key_code);
1277                 return -EINVAL;
1278         }
1279
1280         return 0;
1281 }
1282
1283 static int gb_svc_key_event_recv(struct gb_operation *op)
1284 {
1285         struct gb_svc *svc = gb_connection_get_data(op->connection);
1286         struct gb_message *request = op->request;
1287         struct gb_svc_key_event_request *key;
1288         u16 code;
1289         u8 event;
1290         int ret;
1291
1292         if (request->payload_size < sizeof(*key)) {
1293                 dev_warn(&svc->dev, "short key request received (%zu < %zu)\n",
1294                          request->payload_size, sizeof(*key));
1295                 return -EINVAL;
1296         }
1297
1298         key = request->payload;
1299
1300         ret = gb_svc_key_code_map(svc, le16_to_cpu(key->key_code), &code);
1301         if (ret < 0)
1302                 return ret;
1303
1304         event = key->key_event;
1305         if ((event != GB_SVC_KEY_PRESSED) && (event != GB_SVC_KEY_RELEASED)) {
1306                 dev_warn(&svc->dev, "unknown key event received: %u\n", event);
1307                 return -EINVAL;
1308         }
1309
1310         input_report_key(svc->input, code, (event == GB_SVC_KEY_PRESSED));
1311         input_sync(svc->input);
1312
1313         return 0;
1314 }
1315
1316 static int gb_svc_module_inserted_recv(struct gb_operation *op)
1317 {
1318         struct gb_svc *svc = gb_connection_get_data(op->connection);
1319         struct gb_svc_module_inserted_request *request;
1320
1321         if (op->request->payload_size < sizeof(*request)) {
1322                 dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
1323                                 op->request->payload_size, sizeof(*request));
1324                 return -EINVAL;
1325         }
1326
1327         request = op->request->payload;
1328
1329         dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1330                         request->primary_intf_id);
1331
1332         return gb_svc_queue_deferred_request(op);
1333 }
1334
1335 static int gb_svc_module_removed_recv(struct gb_operation *op)
1336 {
1337         struct gb_svc *svc = gb_connection_get_data(op->connection);
1338         struct gb_svc_module_removed_request *request;
1339
1340         if (op->request->payload_size < sizeof(*request)) {
1341                 dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
1342                                 op->request->payload_size, sizeof(*request));
1343                 return -EINVAL;
1344         }
1345
1346         request = op->request->payload;
1347
1348         dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1349                         request->primary_intf_id);
1350
1351         return gb_svc_queue_deferred_request(op);
1352 }
1353
1354 static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
1355 {
1356         struct gb_svc *svc = gb_connection_get_data(op->connection);
1357         struct gb_svc_intf_mailbox_event_request *request;
1358
1359         if (op->request->payload_size < sizeof(*request)) {
1360                 dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
1361                                 op->request->payload_size, sizeof(*request));
1362                 return -EINVAL;
1363         }
1364
1365         request = op->request->payload;
1366
1367         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1368
1369         return gb_svc_queue_deferred_request(op);
1370 }
1371
1372 static int gb_svc_request_handler(struct gb_operation *op)
1373 {
1374         struct gb_connection *connection = op->connection;
1375         struct gb_svc *svc = gb_connection_get_data(connection);
1376         u8 type = op->type;
1377         int ret = 0;
1378
1379         /*
1380          * SVC requests need to follow a specific order (at least initially) and
1381          * below code takes care of enforcing that. The expected order is:
1382          * - PROTOCOL_VERSION
1383          * - SVC_HELLO
1384          * - Any other request, but the earlier two.
1385          *
1386          * Incoming requests are guaranteed to be serialized and so we don't
1387          * need to protect 'state' for any races.
1388          */
1389         switch (type) {
1390         case GB_SVC_TYPE_PROTOCOL_VERSION:
1391                 if (svc->state != GB_SVC_STATE_RESET)
1392                         ret = -EINVAL;
1393                 break;
1394         case GB_SVC_TYPE_SVC_HELLO:
1395                 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
1396                         ret = -EINVAL;
1397                 break;
1398         default:
1399                 if (svc->state != GB_SVC_STATE_SVC_HELLO)
1400                         ret = -EINVAL;
1401                 break;
1402         }
1403
1404         if (ret) {
1405                 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
1406                                 type, svc->state);
1407                 return ret;
1408         }
1409
1410         switch (type) {
1411         case GB_SVC_TYPE_PROTOCOL_VERSION:
1412                 ret = gb_svc_version_request(op);
1413                 if (!ret)
1414                         svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
1415                 return ret;
1416         case GB_SVC_TYPE_SVC_HELLO:
1417                 ret = gb_svc_hello(op);
1418                 if (!ret)
1419                         svc->state = GB_SVC_STATE_SVC_HELLO;
1420                 return ret;
1421         case GB_SVC_TYPE_INTF_HOTPLUG:
1422                 return gb_svc_intf_hotplug_recv(op);
1423         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
1424                 return gb_svc_intf_hot_unplug_recv(op);
1425         case GB_SVC_TYPE_INTF_RESET:
1426                 return gb_svc_intf_reset_recv(op);
1427         case GB_SVC_TYPE_KEY_EVENT:
1428                 return gb_svc_key_event_recv(op);
1429         case GB_SVC_TYPE_MODULE_INSERTED:
1430                 return gb_svc_module_inserted_recv(op);
1431         case GB_SVC_TYPE_MODULE_REMOVED:
1432                 return gb_svc_module_removed_recv(op);
1433         case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1434                 return gb_svc_intf_mailbox_event_recv(op);
1435         default:
1436                 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
1437                 return -EINVAL;
1438         }
1439 }
1440
1441 static struct input_dev *gb_svc_input_create(struct gb_svc *svc)
1442 {
1443         struct input_dev *input_dev;
1444
1445         input_dev = input_allocate_device();
1446         if (!input_dev)
1447                 return ERR_PTR(-ENOMEM);
1448
1449         input_dev->name = dev_name(&svc->dev);
1450         svc->input_phys = kasprintf(GFP_KERNEL, "greybus-%s/input0",
1451                                     input_dev->name);
1452         if (!svc->input_phys)
1453                 goto err_free_input;
1454
1455         input_dev->phys = svc->input_phys;
1456         input_dev->dev.parent = &svc->dev;
1457
1458         input_set_drvdata(input_dev, svc);
1459
1460         input_set_capability(input_dev, EV_KEY, SVC_KEY_ARA_BUTTON);
1461
1462         return input_dev;
1463
1464 err_free_input:
1465         input_free_device(svc->input);
1466         return ERR_PTR(-ENOMEM);
1467 }
1468
1469 static void gb_svc_release(struct device *dev)
1470 {
1471         struct gb_svc *svc = to_gb_svc(dev);
1472
1473         if (svc->connection)
1474                 gb_connection_destroy(svc->connection);
1475         ida_destroy(&svc->device_id_map);
1476         destroy_workqueue(svc->wq);
1477         kfree(svc->input_phys);
1478         kfree(svc);
1479 }
1480
1481 struct device_type greybus_svc_type = {
1482         .name           = "greybus_svc",
1483         .release        = gb_svc_release,
1484 };
1485
1486 struct gb_svc *gb_svc_create(struct gb_host_device *hd)
1487 {
1488         struct gb_svc *svc;
1489
1490         svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1491         if (!svc)
1492                 return NULL;
1493
1494         svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
1495         if (!svc->wq) {
1496                 kfree(svc);
1497                 return NULL;
1498         }
1499
1500         svc->dev.parent = &hd->dev;
1501         svc->dev.bus = &greybus_bus_type;
1502         svc->dev.type = &greybus_svc_type;
1503         svc->dev.groups = svc_groups;
1504         svc->dev.dma_mask = svc->dev.parent->dma_mask;
1505         device_initialize(&svc->dev);
1506
1507         dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
1508
1509         ida_init(&svc->device_id_map);
1510         svc->state = GB_SVC_STATE_RESET;
1511         svc->hd = hd;
1512
1513         svc->input = gb_svc_input_create(svc);
1514         if (IS_ERR(svc->input)) {
1515                 dev_err(&svc->dev, "failed to create input device: %ld\n",
1516                         PTR_ERR(svc->input));
1517                 goto err_put_device;
1518         }
1519
1520         svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
1521                                                 gb_svc_request_handler);
1522         if (IS_ERR(svc->connection)) {
1523                 dev_err(&svc->dev, "failed to create connection: %ld\n",
1524                                 PTR_ERR(svc->connection));
1525                 goto err_free_input;
1526         }
1527
1528         gb_connection_set_data(svc->connection, svc);
1529
1530         return svc;
1531
1532 err_free_input:
1533         input_free_device(svc->input);
1534 err_put_device:
1535         put_device(&svc->dev);
1536         return NULL;
1537 }
1538
1539 int gb_svc_add(struct gb_svc *svc)
1540 {
1541         int ret;
1542
1543         /*
1544          * The SVC protocol is currently driven by the SVC, so the SVC device
1545          * is added from the connection request handler when enough
1546          * information has been received.
1547          */
1548         ret = gb_connection_enable(svc->connection);
1549         if (ret)
1550                 return ret;
1551
1552         return 0;
1553 }
1554
1555 static void gb_svc_remove_modules(struct gb_svc *svc)
1556 {
1557         struct gb_host_device *hd = svc->hd;
1558         struct gb_module *module, *tmp;
1559
1560         list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
1561                 gb_module_del(module);
1562                 list_del(&module->hd_node);
1563                 gb_module_put(module);
1564         }
1565 }
1566
1567 void gb_svc_del(struct gb_svc *svc)
1568 {
1569         gb_connection_disable(svc->connection);
1570
1571         /*
1572          * The SVC device and input device may have been registered
1573          * from the request handler.
1574          */
1575         if (device_is_registered(&svc->dev)) {
1576                 gb_svc_debugfs_exit(svc);
1577                 gb_svc_watchdog_destroy(svc);
1578                 input_unregister_device(svc->input);
1579                 device_del(&svc->dev);
1580         }
1581
1582         flush_workqueue(svc->wq);
1583
1584         gb_svc_remove_modules(svc);
1585 }
1586
1587 void gb_svc_put(struct gb_svc *svc)
1588 {
1589         put_device(&svc->dev);
1590 }