80e8cf04ade9899ee9f3841d2e78bf66bd115763
[cascardo/linux.git] / drivers / staging / greybus / svc.c
1 /*
2  * SVC Greybus driver.
3  *
4  * Copyright 2015 Google Inc.
5  * Copyright 2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/debugfs.h>
11 #include <linux/input.h>
12 #include <linux/workqueue.h>
13
14 #include "greybus.h"
15
16 #define SVC_KEY_ARA_BUTTON      KEY_A
17
18 #define SVC_INTF_EJECT_TIMEOUT          9000
19 #define SVC_INTF_ACTIVATE_TIMEOUT       6000
20
21 struct gb_svc_deferred_request {
22         struct work_struct work;
23         struct gb_operation *operation;
24 };
25
26
27 static int gb_svc_queue_deferred_request(struct gb_operation *operation);
28
29 static ssize_t endo_id_show(struct device *dev,
30                         struct device_attribute *attr, char *buf)
31 {
32         struct gb_svc *svc = to_gb_svc(dev);
33
34         return sprintf(buf, "0x%04x\n", svc->endo_id);
35 }
36 static DEVICE_ATTR_RO(endo_id);
37
38 static ssize_t ap_intf_id_show(struct device *dev,
39                         struct device_attribute *attr, char *buf)
40 {
41         struct gb_svc *svc = to_gb_svc(dev);
42
43         return sprintf(buf, "%u\n", svc->ap_intf_id);
44 }
45 static DEVICE_ATTR_RO(ap_intf_id);
46
47
48 // FIXME
49 // This is a hack, we need to do this "right" and clean the interface up
50 // properly, not just forcibly yank the thing out of the system and hope for the
51 // best.  But for now, people want their modules to come out without having to
52 // throw the thing to the ground or get out a screwdriver.
53 static ssize_t intf_eject_store(struct device *dev,
54                                 struct device_attribute *attr, const char *buf,
55                                 size_t len)
56 {
57         struct gb_svc *svc = to_gb_svc(dev);
58         unsigned short intf_id;
59         int ret;
60
61         ret = kstrtou16(buf, 10, &intf_id);
62         if (ret < 0)
63                 return ret;
64
65         dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
66
67         ret = gb_svc_intf_eject(svc, intf_id);
68         if (ret < 0)
69                 return ret;
70
71         return len;
72 }
73 static DEVICE_ATTR_WO(intf_eject);
74
75 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
76                              char *buf)
77 {
78         struct gb_svc *svc = to_gb_svc(dev);
79
80         return sprintf(buf, "%s\n",
81                        gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
82 }
83
84 static ssize_t watchdog_store(struct device *dev,
85                               struct device_attribute *attr, const char *buf,
86                               size_t len)
87 {
88         struct gb_svc *svc = to_gb_svc(dev);
89         int retval;
90         bool user_request;
91
92         retval = strtobool(buf, &user_request);
93         if (retval)
94                 return retval;
95
96         if (user_request)
97                 retval = gb_svc_watchdog_enable(svc);
98         else
99                 retval = gb_svc_watchdog_disable(svc);
100         if (retval)
101                 return retval;
102         return len;
103 }
104 static DEVICE_ATTR_RW(watchdog);
105
106 static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
107 {
108         struct gb_svc_pwrmon_rail_count_get_response response;
109         int ret;
110
111         ret = gb_operation_sync(svc->connection,
112                                 GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
113                                 &response, sizeof(response));
114         if (ret) {
115                 dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
116                 return ret;
117         }
118
119         *value = response.rail_count;
120
121         return 0;
122 }
123
124 static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
125                 struct gb_svc_pwrmon_rail_names_get_response *response,
126                 size_t bufsize)
127 {
128         int ret;
129
130         ret = gb_operation_sync(svc->connection,
131                                 GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
132                                 response, bufsize);
133         if (ret) {
134                 dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
135                 return ret;
136         }
137
138         return 0;
139 }
140
141 static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
142                                     u8 measurement_type, u32 *value)
143 {
144         struct gb_svc_pwrmon_sample_get_request request;
145         struct gb_svc_pwrmon_sample_get_response response;
146         int ret;
147
148         request.rail_id = rail_id;
149         request.measurement_type = measurement_type;
150
151         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
152                                 &request, sizeof(request),
153                                 &response, sizeof(response));
154         if (ret) {
155                 dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
156                 return ret;
157         }
158
159         if (response.result) {
160                 dev_err(&svc->dev,
161                         "UniPro error while getting rail power sample (%d %d): %d\n",
162                         rail_id, measurement_type, response.result);
163                 switch (response.result) {
164                 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
165                         return -EINVAL;
166                 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
167                         return -ENOMSG;
168                 default:
169                         return -EIO;
170                 }
171         }
172
173         *value = le32_to_cpu(response.measurement);
174
175         return 0;
176 }
177
178 int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
179                                   u8 measurement_type, u32 *value)
180 {
181         struct gb_svc_pwrmon_intf_sample_get_request request;
182         struct gb_svc_pwrmon_intf_sample_get_response response;
183         int ret;
184
185         request.intf_id = intf_id;
186         request.measurement_type = measurement_type;
187
188         ret = gb_operation_sync(svc->connection,
189                                 GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
190                                 &request, sizeof(request),
191                                 &response, sizeof(response));
192         if (ret) {
193                 dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
194                 return ret;
195         }
196
197         if (response.result) {
198                 dev_err(&svc->dev,
199                         "UniPro error while getting intf power sample (%d %d): %d\n",
200                         intf_id, measurement_type, response.result);
201                 switch (response.result) {
202                 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
203                         return -EINVAL;
204                 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
205                         return -ENOSYS;
206                 default:
207                         return -EIO;
208                 }
209         }
210
211         *value = le32_to_cpu(response.measurement);
212
213         return 0;
214 }
215
216 static struct attribute *svc_attrs[] = {
217         &dev_attr_endo_id.attr,
218         &dev_attr_ap_intf_id.attr,
219         &dev_attr_intf_eject.attr,
220         &dev_attr_watchdog.attr,
221         NULL,
222 };
223 ATTRIBUTE_GROUPS(svc);
224
225 int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
226 {
227         struct gb_svc_intf_device_id_request request;
228
229         request.intf_id = intf_id;
230         request.device_id = device_id;
231
232         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
233                                  &request, sizeof(request), NULL, 0);
234 }
235
236 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
237 {
238         struct gb_svc_intf_eject_request request;
239         int ret;
240
241         request.intf_id = intf_id;
242
243         /*
244          * The pulse width for module release in svc is long so we need to
245          * increase the timeout so the operation will not return to soon.
246          */
247         ret = gb_operation_sync_timeout(svc->connection,
248                                         GB_SVC_TYPE_INTF_EJECT, &request,
249                                         sizeof(request), NULL, 0,
250                                         SVC_INTF_EJECT_TIMEOUT);
251         if (ret) {
252                 dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
253                 return ret;
254         }
255
256         return 0;
257 }
258
259 int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
260 {
261         struct gb_svc_intf_vsys_request request;
262         struct gb_svc_intf_vsys_response response;
263         int type, ret;
264
265         request.intf_id = intf_id;
266
267         if (enable)
268                 type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
269         else
270                 type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
271
272         ret = gb_operation_sync(svc->connection, type,
273                         &request, sizeof(request),
274                         &response, sizeof(response));
275         if (ret < 0)
276                 return ret;
277         if (response.result_code != GB_SVC_INTF_VSYS_OK)
278                 return -EREMOTEIO;
279         return 0;
280 }
281
282 int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
283 {
284         struct gb_svc_intf_refclk_request request;
285         struct gb_svc_intf_refclk_response response;
286         int type, ret;
287
288         request.intf_id = intf_id;
289
290         if (enable)
291                 type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
292         else
293                 type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
294
295         ret = gb_operation_sync(svc->connection, type,
296                         &request, sizeof(request),
297                         &response, sizeof(response));
298         if (ret < 0)
299                 return ret;
300         if (response.result_code != GB_SVC_INTF_REFCLK_OK)
301                 return -EREMOTEIO;
302         return 0;
303 }
304
305 int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
306 {
307         struct gb_svc_intf_unipro_request request;
308         struct gb_svc_intf_unipro_response response;
309         int type, ret;
310
311         request.intf_id = intf_id;
312
313         if (enable)
314                 type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
315         else
316                 type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
317
318         ret = gb_operation_sync(svc->connection, type,
319                         &request, sizeof(request),
320                         &response, sizeof(response));
321         if (ret < 0)
322                 return ret;
323         if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
324                 return -EREMOTEIO;
325         return 0;
326 }
327
328 int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
329 {
330         struct gb_svc_intf_activate_request request;
331         struct gb_svc_intf_activate_response response;
332         int ret;
333
334         request.intf_id = intf_id;
335
336         ret = gb_operation_sync_timeout(svc->connection,
337                         GB_SVC_TYPE_INTF_ACTIVATE,
338                         &request, sizeof(request),
339                         &response, sizeof(response),
340                         SVC_INTF_ACTIVATE_TIMEOUT);
341         if (ret < 0)
342                 return ret;
343         if (response.status != GB_SVC_OP_SUCCESS) {
344                 dev_err(&svc->dev, "failed to activate interface %u: %u\n",
345                                 intf_id, response.status);
346                 return -EREMOTEIO;
347         }
348
349         *intf_type = response.intf_type;
350
351         return 0;
352 }
353
354 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
355                         u32 *value)
356 {
357         struct gb_svc_dme_peer_get_request request;
358         struct gb_svc_dme_peer_get_response response;
359         u16 result;
360         int ret;
361
362         request.intf_id = intf_id;
363         request.attr = cpu_to_le16(attr);
364         request.selector = cpu_to_le16(selector);
365
366         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
367                                 &request, sizeof(request),
368                                 &response, sizeof(response));
369         if (ret) {
370                 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
371                                 intf_id, attr, selector, ret);
372                 return ret;
373         }
374
375         result = le16_to_cpu(response.result_code);
376         if (result) {
377                 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
378                                 intf_id, attr, selector, result);
379                 return -EIO;
380         }
381
382         if (value)
383                 *value = le32_to_cpu(response.attr_value);
384
385         return 0;
386 }
387 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
388
389 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
390                         u32 value)
391 {
392         struct gb_svc_dme_peer_set_request request;
393         struct gb_svc_dme_peer_set_response response;
394         u16 result;
395         int ret;
396
397         request.intf_id = intf_id;
398         request.attr = cpu_to_le16(attr);
399         request.selector = cpu_to_le16(selector);
400         request.value = cpu_to_le32(value);
401
402         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
403                                 &request, sizeof(request),
404                                 &response, sizeof(response));
405         if (ret) {
406                 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
407                                 intf_id, attr, selector, value, ret);
408                 return ret;
409         }
410
411         result = le16_to_cpu(response.result_code);
412         if (result) {
413                 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
414                                 intf_id, attr, selector, value, result);
415                 return -EIO;
416         }
417
418         return 0;
419 }
420 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
421
422 int gb_svc_connection_create(struct gb_svc *svc,
423                                 u8 intf1_id, u16 cport1_id,
424                                 u8 intf2_id, u16 cport2_id,
425                                 u8 cport_flags)
426 {
427         struct gb_svc_conn_create_request request;
428
429         request.intf1_id = intf1_id;
430         request.cport1_id = cpu_to_le16(cport1_id);
431         request.intf2_id = intf2_id;
432         request.cport2_id = cpu_to_le16(cport2_id);
433         request.tc = 0;         /* TC0 */
434         request.flags = cport_flags;
435
436         return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
437                                  &request, sizeof(request), NULL, 0);
438 }
439 EXPORT_SYMBOL_GPL(gb_svc_connection_create);
440
441 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
442                                u8 intf2_id, u16 cport2_id)
443 {
444         struct gb_svc_conn_destroy_request request;
445         struct gb_connection *connection = svc->connection;
446         int ret;
447
448         request.intf1_id = intf1_id;
449         request.cport1_id = cpu_to_le16(cport1_id);
450         request.intf2_id = intf2_id;
451         request.cport2_id = cpu_to_le16(cport2_id);
452
453         ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
454                                 &request, sizeof(request), NULL, 0);
455         if (ret) {
456                 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
457                                 intf1_id, cport1_id, intf2_id, cport2_id, ret);
458         }
459 }
460 EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
461
462 int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
463                            u32 strobe_delay, u32 refclk)
464 {
465         struct gb_connection *connection = svc->connection;
466         struct gb_svc_timesync_enable_request request;
467
468         request.count = count;
469         request.frame_time = cpu_to_le64(frame_time);
470         request.strobe_delay = cpu_to_le32(strobe_delay);
471         request.refclk = cpu_to_le32(refclk);
472         return gb_operation_sync(connection,
473                                  GB_SVC_TYPE_TIMESYNC_ENABLE,
474                                  &request, sizeof(request), NULL, 0);
475 }
476 EXPORT_SYMBOL_GPL(gb_svc_timesync_enable);
477
478 int gb_svc_timesync_disable(struct gb_svc *svc)
479 {
480         struct gb_connection *connection = svc->connection;
481
482         return gb_operation_sync(connection,
483                                  GB_SVC_TYPE_TIMESYNC_DISABLE,
484                                  NULL, 0, NULL, 0);
485 }
486 EXPORT_SYMBOL_GPL(gb_svc_timesync_disable);
487
488 int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time)
489 {
490         struct gb_connection *connection = svc->connection;
491         struct gb_svc_timesync_authoritative_response response;
492         int ret, i;
493
494         ret = gb_operation_sync(connection,
495                                 GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE, NULL, 0,
496                                 &response, sizeof(response));
497         if (ret < 0)
498                 return ret;
499
500         for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
501                 frame_time[i] = le64_to_cpu(response.frame_time[i]);
502         return 0;
503 }
504 EXPORT_SYMBOL_GPL(gb_svc_timesync_authoritative);
505
506 int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time)
507 {
508         struct gb_connection *connection = svc->connection;
509         struct gb_svc_timesync_ping_response response;
510         int ret;
511
512         ret = gb_operation_sync(connection,
513                                 GB_SVC_TYPE_TIMESYNC_PING,
514                                 NULL, 0,
515                                 &response, sizeof(response));
516         if (ret < 0)
517                 return ret;
518
519         *frame_time = le64_to_cpu(response.frame_time);
520         return 0;
521 }
522 EXPORT_SYMBOL_GPL(gb_svc_timesync_ping);
523
524 int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask)
525 {
526         struct gb_connection *connection = svc->connection;
527         struct gb_svc_timesync_wake_pins_acquire_request request;
528
529         request.strobe_mask = cpu_to_le32(strobe_mask);
530         return gb_operation_sync(connection,
531                                  GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE,
532                                  &request, sizeof(request),
533                                  NULL, 0);
534 }
535 EXPORT_SYMBOL_GPL(gb_svc_timesync_wake_pins_acquire);
536
537 int gb_svc_timesync_wake_pins_release(struct gb_svc *svc)
538 {
539         struct gb_connection *connection = svc->connection;
540
541         return gb_operation_sync(connection,
542                                  GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE,
543                                  NULL, 0, NULL, 0);
544 }
545 EXPORT_SYMBOL_GPL(gb_svc_timesync_wake_pins_release);
546
547 /* Creates bi-directional routes between the devices */
548 int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
549                                u8 intf2_id, u8 dev2_id)
550 {
551         struct gb_svc_route_create_request request;
552
553         request.intf1_id = intf1_id;
554         request.dev1_id = dev1_id;
555         request.intf2_id = intf2_id;
556         request.dev2_id = dev2_id;
557
558         return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
559                                  &request, sizeof(request), NULL, 0);
560 }
561
562 /* Destroys bi-directional routes between the devices */
563 void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
564 {
565         struct gb_svc_route_destroy_request request;
566         int ret;
567
568         request.intf1_id = intf1_id;
569         request.intf2_id = intf2_id;
570
571         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
572                                 &request, sizeof(request), NULL, 0);
573         if (ret) {
574                 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
575                                 intf1_id, intf2_id, ret);
576         }
577 }
578
579 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
580                                u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
581                                u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
582                                u8 flags, u32 quirks)
583 {
584         struct gb_svc_intf_set_pwrm_request request;
585         struct gb_svc_intf_set_pwrm_response response;
586         int ret;
587
588         request.intf_id = intf_id;
589         request.hs_series = hs_series;
590         request.tx_mode = tx_mode;
591         request.tx_gear = tx_gear;
592         request.tx_nlanes = tx_nlanes;
593         request.rx_mode = rx_mode;
594         request.rx_gear = rx_gear;
595         request.rx_nlanes = rx_nlanes;
596         request.flags = flags;
597         request.quirks = cpu_to_le32(quirks);
598
599         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
600                                 &request, sizeof(request),
601                                 &response, sizeof(response));
602         if (ret < 0)
603                 return ret;
604
605         return le16_to_cpu(response.result_code);
606 }
607 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
608
609 int gb_svc_ping(struct gb_svc *svc)
610 {
611         return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
612                                          NULL, 0, NULL, 0,
613                                          GB_OPERATION_TIMEOUT_DEFAULT * 2);
614 }
615 EXPORT_SYMBOL_GPL(gb_svc_ping);
616
617 static int gb_svc_version_request(struct gb_operation *op)
618 {
619         struct gb_connection *connection = op->connection;
620         struct gb_svc *svc = gb_connection_get_data(connection);
621         struct gb_svc_version_request *request;
622         struct gb_svc_version_response *response;
623
624         if (op->request->payload_size < sizeof(*request)) {
625                 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
626                                 op->request->payload_size,
627                                 sizeof(*request));
628                 return -EINVAL;
629         }
630
631         request = op->request->payload;
632
633         if (request->major > GB_SVC_VERSION_MAJOR) {
634                 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
635                                 request->major, GB_SVC_VERSION_MAJOR);
636                 return -ENOTSUPP;
637         }
638
639         svc->protocol_major = request->major;
640         svc->protocol_minor = request->minor;
641
642         if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
643                 return -ENOMEM;
644
645         response = op->response->payload;
646         response->major = svc->protocol_major;
647         response->minor = svc->protocol_minor;
648
649         return 0;
650 }
651
652 static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
653                                         size_t len, loff_t *offset)
654 {
655         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
656         struct gb_svc *svc = pwrmon_rails->svc;
657         int ret, desc;
658         u32 value;
659         char buff[16];
660
661         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
662                                        GB_SVC_PWRMON_TYPE_VOL, &value);
663         if (ret) {
664                 dev_err(&svc->dev,
665                         "failed to get voltage sample %u: %d\n",
666                         pwrmon_rails->id, ret);
667                 return ret;
668         }
669
670         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
671
672         return simple_read_from_buffer(buf, len, offset, buff, desc);
673 }
674
675 static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
676                                         size_t len, loff_t *offset)
677 {
678         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
679         struct gb_svc *svc = pwrmon_rails->svc;
680         int ret, desc;
681         u32 value;
682         char buff[16];
683
684         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
685                                        GB_SVC_PWRMON_TYPE_CURR, &value);
686         if (ret) {
687                 dev_err(&svc->dev,
688                         "failed to get current sample %u: %d\n",
689                         pwrmon_rails->id, ret);
690                 return ret;
691         }
692
693         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
694
695         return simple_read_from_buffer(buf, len, offset, buff, desc);
696 }
697
698 static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
699                                       size_t len, loff_t *offset)
700 {
701         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
702         struct gb_svc *svc = pwrmon_rails->svc;
703         int ret, desc;
704         u32 value;
705         char buff[16];
706
707         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
708                                        GB_SVC_PWRMON_TYPE_PWR, &value);
709         if (ret) {
710                 dev_err(&svc->dev, "failed to get power sample %u: %d\n",
711                         pwrmon_rails->id, ret);
712                 return ret;
713         }
714
715         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
716
717         return simple_read_from_buffer(buf, len, offset, buff, desc);
718 }
719
720 static const struct file_operations pwrmon_debugfs_voltage_fops = {
721         .read           = pwr_debugfs_voltage_read,
722 };
723
724 static const struct file_operations pwrmon_debugfs_current_fops = {
725         .read           = pwr_debugfs_current_read,
726 };
727
728 static const struct file_operations pwrmon_debugfs_power_fops = {
729         .read           = pwr_debugfs_power_read,
730 };
731
732 static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
733 {
734         int i;
735         size_t bufsize;
736         struct dentry *dent;
737         struct gb_svc_pwrmon_rail_names_get_response *rail_names;
738         u8 rail_count;
739
740         dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
741         if (IS_ERR_OR_NULL(dent))
742                 return;
743
744         if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
745                 goto err_pwrmon_debugfs;
746
747         if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
748                 goto err_pwrmon_debugfs;
749
750         bufsize = GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
751
752         rail_names = kzalloc(bufsize, GFP_KERNEL);
753         if (!rail_names)
754                 goto err_pwrmon_debugfs;
755
756         svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
757                                     GFP_KERNEL);
758         if (!svc->pwrmon_rails)
759                 goto err_pwrmon_debugfs_free;
760
761         if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
762                 goto err_pwrmon_debugfs_free;
763
764         for (i = 0; i < rail_count; i++) {
765                 struct dentry *dir;
766                 struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
767                 char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
768
769                 snprintf(fname, sizeof(fname), "%s",
770                          (char *)&rail_names->name[i]);
771
772                 rail->id = i;
773                 rail->svc = svc;
774
775                 dir = debugfs_create_dir(fname, dent);
776                 debugfs_create_file("voltage_now", S_IRUGO, dir, rail,
777                                     &pwrmon_debugfs_voltage_fops);
778                 debugfs_create_file("current_now", S_IRUGO, dir, rail,
779                                     &pwrmon_debugfs_current_fops);
780                 debugfs_create_file("power_now", S_IRUGO, dir, rail,
781                                     &pwrmon_debugfs_power_fops);
782         };
783
784         kfree(rail_names);
785         return;
786
787 err_pwrmon_debugfs_free:
788         kfree(rail_names);
789         kfree(svc->pwrmon_rails);
790         svc->pwrmon_rails = NULL;
791
792 err_pwrmon_debugfs:
793         debugfs_remove(dent);
794 }
795
796 static void gb_svc_debugfs_init(struct gb_svc *svc)
797 {
798         svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
799                                                  gb_debugfs_get());
800         gb_svc_pwrmon_debugfs_init(svc);
801 }
802
803 static void gb_svc_debugfs_exit(struct gb_svc *svc)
804 {
805         debugfs_remove_recursive(svc->debugfs_dentry);
806         kfree(svc->pwrmon_rails);
807         svc->pwrmon_rails = NULL;
808 }
809
810 static int gb_svc_hello(struct gb_operation *op)
811 {
812         struct gb_connection *connection = op->connection;
813         struct gb_svc *svc = gb_connection_get_data(connection);
814         struct gb_svc_hello_request *hello_request;
815         int ret;
816
817         if (op->request->payload_size < sizeof(*hello_request)) {
818                 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
819                                 op->request->payload_size,
820                                 sizeof(*hello_request));
821                 return -EINVAL;
822         }
823
824         hello_request = op->request->payload;
825         svc->endo_id = le16_to_cpu(hello_request->endo_id);
826         svc->ap_intf_id = hello_request->interface_id;
827
828         ret = device_add(&svc->dev);
829         if (ret) {
830                 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
831                 return ret;
832         }
833
834         ret = input_register_device(svc->input);
835         if (ret) {
836                 dev_err(&svc->dev, "failed to register input: %d\n", ret);
837                 device_del(&svc->dev);
838                 return ret;
839         }
840
841         ret = gb_svc_watchdog_create(svc);
842         if (ret) {
843                 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
844                 input_unregister_device(svc->input);
845                 device_del(&svc->dev);
846                 return ret;
847         }
848
849         gb_svc_debugfs_init(svc);
850
851         return gb_svc_queue_deferred_request(op);
852 }
853
854 static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
855                                                         u8 intf_id)
856 {
857         struct gb_host_device *hd = svc->hd;
858         struct gb_module *module;
859         size_t num_interfaces;
860         u8 module_id;
861
862         list_for_each_entry(module, &hd->modules, hd_node) {
863                 module_id = module->module_id;
864                 num_interfaces = module->num_interfaces;
865
866                 if (intf_id >= module_id &&
867                                 intf_id < module_id + num_interfaces) {
868                         return module->interfaces[intf_id - module_id];
869                 }
870         }
871
872         return NULL;
873 }
874
875 static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
876 {
877         struct gb_host_device *hd = svc->hd;
878         struct gb_module *module;
879
880         list_for_each_entry(module, &hd->modules, hd_node) {
881                 if (module->module_id == module_id)
882                         return module;
883         }
884
885         return NULL;
886 }
887
888 static void gb_svc_intf_reenable(struct gb_svc *svc, struct gb_interface *intf)
889 {
890         int ret;
891
892         mutex_lock(&intf->mutex);
893
894         /* Mark as disconnected to prevent I/O during disable. */
895         intf->disconnected = true;
896         gb_interface_disable(intf);
897         intf->disconnected = false;
898
899         ret = gb_interface_enable(intf);
900         if (ret) {
901                 dev_err(&svc->dev, "failed to enable interface %u: %d\n",
902                                 intf->interface_id, ret);
903
904                 gb_interface_deactivate(intf);
905         }
906
907         mutex_unlock(&intf->mutex);
908 }
909
910 static void gb_svc_process_hello_deferred(struct gb_operation *operation)
911 {
912         struct gb_connection *connection = operation->connection;
913         struct gb_svc *svc = gb_connection_get_data(connection);
914         int ret;
915
916         /*
917          * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
918          * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
919          * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
920          * module.
921          *
922          * The code should be removed once SW-2217, Heuristic for UniPro
923          * Power Mode Changes is resolved.
924          */
925         ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
926                                         GB_SVC_UNIPRO_HS_SERIES_A,
927                                         GB_SVC_UNIPRO_SLOW_AUTO_MODE,
928                                         2, 1,
929                                         GB_SVC_UNIPRO_SLOW_AUTO_MODE,
930                                         2, 1,
931                                         0, 0);
932
933         if (ret)
934                 dev_warn(&svc->dev,
935                         "power mode change failed on AP to switch link: %d\n",
936                         ret);
937 }
938
939 static void gb_svc_process_intf_hotplug(struct gb_operation *operation)
940 {
941         struct gb_svc_intf_hotplug_request *request;
942         struct gb_connection *connection = operation->connection;
943         struct gb_svc *svc = gb_connection_get_data(connection);
944         struct gb_host_device *hd = connection->hd;
945         struct gb_module *module;
946         u8 intf_id;
947         int ret;
948
949         /* The request message size has already been verified. */
950         request = operation->request->payload;
951         intf_id = request->intf_id;
952
953         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
954
955         /* All modules are considered 1x2 for now */
956         module = gb_svc_module_lookup(svc, intf_id);
957         if (module) {
958                 dev_info(&svc->dev, "mode switch detected on interface %u\n",
959                                 intf_id);
960
961                 return gb_svc_intf_reenable(svc, module->interfaces[0]);
962         }
963
964         module = gb_module_create(hd, intf_id, 1);
965         if (!module) {
966                 dev_err(&svc->dev, "failed to create module\n");
967                 return;
968         }
969
970         ret = gb_module_add(module);
971         if (ret) {
972                 gb_module_put(module);
973                 return;
974         }
975
976         list_add(&module->hd_node, &hd->modules);
977 }
978
979 static void gb_svc_process_intf_hot_unplug(struct gb_operation *operation)
980 {
981         struct gb_svc *svc = gb_connection_get_data(operation->connection);
982         struct gb_svc_intf_hot_unplug_request *request;
983         struct gb_module *module;
984         u8 intf_id;
985
986         /* The request message size has already been verified. */
987         request = operation->request->payload;
988         intf_id = request->intf_id;
989
990         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
991
992         /* All modules are considered 1x2 for now */
993         module = gb_svc_module_lookup(svc, intf_id);
994         if (!module) {
995                 dev_warn(&svc->dev, "could not find hot-unplug interface %u\n",
996                                 intf_id);
997                 return;
998         }
999
1000         module->disconnected = true;
1001
1002         gb_module_del(module);
1003         list_del(&module->hd_node);
1004         gb_module_put(module);
1005 }
1006
1007 static void gb_svc_process_module_inserted(struct gb_operation *operation)
1008 {
1009         struct gb_svc_module_inserted_request *request;
1010         struct gb_connection *connection = operation->connection;
1011         struct gb_svc *svc = gb_connection_get_data(connection);
1012         struct gb_host_device *hd = svc->hd;
1013         struct gb_module *module;
1014         size_t num_interfaces;
1015         u8 module_id;
1016         u16 flags;
1017         int ret;
1018
1019         /* The request message size has already been verified. */
1020         request = operation->request->payload;
1021         module_id = request->primary_intf_id;
1022         num_interfaces = request->intf_count;
1023         flags = le16_to_cpu(request->flags);
1024
1025         dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
1026                         __func__, module_id, num_interfaces, flags);
1027
1028         if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
1029                 dev_warn(&svc->dev, "no primary interface detected on module %u\n",
1030                                 module_id);
1031         }
1032
1033         module = gb_svc_module_lookup(svc, module_id);
1034         if (module) {
1035                 dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
1036                                 module_id);
1037                 return;
1038         }
1039
1040         module = gb_module_create(hd, module_id, num_interfaces);
1041         if (!module) {
1042                 dev_err(&svc->dev, "failed to create module\n");
1043                 return;
1044         }
1045
1046         ret = gb_module_add(module);
1047         if (ret) {
1048                 gb_module_put(module);
1049                 return;
1050         }
1051
1052         list_add(&module->hd_node, &hd->modules);
1053 }
1054
1055 static void gb_svc_process_module_removed(struct gb_operation *operation)
1056 {
1057         struct gb_svc_module_removed_request *request;
1058         struct gb_connection *connection = operation->connection;
1059         struct gb_svc *svc = gb_connection_get_data(connection);
1060         struct gb_module *module;
1061         u8 module_id;
1062
1063         /* The request message size has already been verified. */
1064         request = operation->request->payload;
1065         module_id = request->primary_intf_id;
1066
1067         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
1068
1069         module = gb_svc_module_lookup(svc, module_id);
1070         if (!module) {
1071                 dev_warn(&svc->dev, "unexpected module-removed event %u\n",
1072                                 module_id);
1073                 return;
1074         }
1075
1076         module->disconnected = true;
1077
1078         gb_module_del(module);
1079         list_del(&module->hd_node);
1080         gb_module_put(module);
1081 }
1082
1083 static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
1084 {
1085         struct gb_svc_intf_mailbox_event_request *request;
1086         struct gb_connection *connection = operation->connection;
1087         struct gb_svc *svc = gb_connection_get_data(connection);
1088         struct gb_interface *intf;
1089         u8 intf_id;
1090         u16 result_code;
1091         u32 mailbox;
1092
1093         /* The request message size has already been verified. */
1094         request = operation->request->payload;
1095         intf_id = request->intf_id;
1096         result_code = le16_to_cpu(request->result_code);
1097         mailbox = le32_to_cpu(request->mailbox);
1098
1099         dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
1100                         __func__, intf_id, result_code, mailbox);
1101
1102         intf = gb_svc_interface_lookup(svc, intf_id);
1103         if (!intf) {
1104                 dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
1105                 return;
1106         }
1107
1108         if (result_code) {
1109                 dev_warn(&svc->dev,
1110                                 "mailbox event %u with UniPro error: 0x%04x\n",
1111                                 intf_id, result_code);
1112                 goto err_disable_interface;
1113         }
1114
1115         if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
1116                 dev_warn(&svc->dev,
1117                                 "mailbox event %u with unexected value: 0x%08x\n",
1118                                 intf_id, mailbox);
1119                 goto err_disable_interface;
1120         }
1121
1122         dev_info(&svc->dev, "mode switch detected on interface %u\n", intf_id);
1123
1124         gb_svc_intf_reenable(svc, intf);
1125
1126         return;
1127
1128 err_disable_interface:
1129         mutex_lock(&intf->mutex);
1130         gb_interface_disable(intf);
1131         gb_interface_deactivate(intf);
1132         mutex_unlock(&intf->mutex);
1133 }
1134
1135 static void gb_svc_process_deferred_request(struct work_struct *work)
1136 {
1137         struct gb_svc_deferred_request *dr;
1138         struct gb_operation *operation;
1139         struct gb_svc *svc;
1140         u8 type;
1141
1142         dr = container_of(work, struct gb_svc_deferred_request, work);
1143         operation = dr->operation;
1144         svc = gb_connection_get_data(operation->connection);
1145         type = operation->request->header->type;
1146
1147         switch (type) {
1148         case GB_SVC_TYPE_SVC_HELLO:
1149                 gb_svc_process_hello_deferred(operation);
1150                 break;
1151         case GB_SVC_TYPE_INTF_HOTPLUG:
1152                 gb_svc_process_intf_hotplug(operation);
1153                 break;
1154         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
1155                 gb_svc_process_intf_hot_unplug(operation);
1156                 break;
1157         case GB_SVC_TYPE_MODULE_INSERTED:
1158                 gb_svc_process_module_inserted(operation);
1159                 break;
1160         case GB_SVC_TYPE_MODULE_REMOVED:
1161                 gb_svc_process_module_removed(operation);
1162                 break;
1163         case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1164                 gb_svc_process_intf_mailbox_event(operation);
1165                 break;
1166         default:
1167                 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
1168         }
1169
1170         gb_operation_put(operation);
1171         kfree(dr);
1172 }
1173
1174 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
1175 {
1176         struct gb_svc *svc = gb_connection_get_data(operation->connection);
1177         struct gb_svc_deferred_request *dr;
1178
1179         dr = kmalloc(sizeof(*dr), GFP_KERNEL);
1180         if (!dr)
1181                 return -ENOMEM;
1182
1183         gb_operation_get(operation);
1184
1185         dr->operation = operation;
1186         INIT_WORK(&dr->work, gb_svc_process_deferred_request);
1187
1188         queue_work(svc->wq, &dr->work);
1189
1190         return 0;
1191 }
1192
1193 /*
1194  * Bringing up a module can be time consuming, as that may require lots of
1195  * initialization on the module side. Over that, we may also need to download
1196  * the firmware first and flash that on the module.
1197  *
1198  * In order not to make other svc events wait for all this to finish,
1199  * handle most of module hotplug stuff outside of the hotplug callback, with
1200  * help of a workqueue.
1201  */
1202 static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
1203 {
1204         struct gb_svc *svc = gb_connection_get_data(op->connection);
1205         struct gb_svc_intf_hotplug_request *request;
1206
1207         if (op->request->payload_size < sizeof(*request)) {
1208                 dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n",
1209                                 op->request->payload_size, sizeof(*request));
1210                 return -EINVAL;
1211         }
1212
1213         request = op->request->payload;
1214
1215         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1216
1217         return gb_svc_queue_deferred_request(op);
1218 }
1219
1220 static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
1221 {
1222         struct gb_svc *svc = gb_connection_get_data(op->connection);
1223         struct gb_svc_intf_hot_unplug_request *request;
1224
1225         if (op->request->payload_size < sizeof(*request)) {
1226                 dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n",
1227                                 op->request->payload_size, sizeof(*request));
1228                 return -EINVAL;
1229         }
1230
1231         request = op->request->payload;
1232
1233         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1234
1235         return gb_svc_queue_deferred_request(op);
1236 }
1237
1238 static int gb_svc_intf_reset_recv(struct gb_operation *op)
1239 {
1240         struct gb_svc *svc = gb_connection_get_data(op->connection);
1241         struct gb_message *request = op->request;
1242         struct gb_svc_intf_reset_request *reset;
1243         u8 intf_id;
1244
1245         if (request->payload_size < sizeof(*reset)) {
1246                 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
1247                                 request->payload_size, sizeof(*reset));
1248                 return -EINVAL;
1249         }
1250         reset = request->payload;
1251
1252         intf_id = reset->intf_id;
1253
1254         /* FIXME Reset the interface here */
1255
1256         return 0;
1257 }
1258
1259 static int gb_svc_key_code_map(struct gb_svc *svc, u16 key_code, u16 *code)
1260 {
1261         switch (key_code) {
1262         case GB_KEYCODE_ARA:
1263                 *code = SVC_KEY_ARA_BUTTON;
1264                 break;
1265         default:
1266                 dev_warn(&svc->dev, "unknown keycode received: %u\n", key_code);
1267                 return -EINVAL;
1268         }
1269
1270         return 0;
1271 }
1272
1273 static int gb_svc_key_event_recv(struct gb_operation *op)
1274 {
1275         struct gb_svc *svc = gb_connection_get_data(op->connection);
1276         struct gb_message *request = op->request;
1277         struct gb_svc_key_event_request *key;
1278         u16 code;
1279         u8 event;
1280         int ret;
1281
1282         if (request->payload_size < sizeof(*key)) {
1283                 dev_warn(&svc->dev, "short key request received (%zu < %zu)\n",
1284                          request->payload_size, sizeof(*key));
1285                 return -EINVAL;
1286         }
1287
1288         key = request->payload;
1289
1290         ret = gb_svc_key_code_map(svc, le16_to_cpu(key->key_code), &code);
1291         if (ret < 0)
1292                 return ret;
1293
1294         event = key->key_event;
1295         if ((event != GB_SVC_KEY_PRESSED) && (event != GB_SVC_KEY_RELEASED)) {
1296                 dev_warn(&svc->dev, "unknown key event received: %u\n", event);
1297                 return -EINVAL;
1298         }
1299
1300         input_report_key(svc->input, code, (event == GB_SVC_KEY_PRESSED));
1301         input_sync(svc->input);
1302
1303         return 0;
1304 }
1305
1306 static int gb_svc_module_inserted_recv(struct gb_operation *op)
1307 {
1308         struct gb_svc *svc = gb_connection_get_data(op->connection);
1309         struct gb_svc_module_inserted_request *request;
1310
1311         if (op->request->payload_size < sizeof(*request)) {
1312                 dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
1313                                 op->request->payload_size, sizeof(*request));
1314                 return -EINVAL;
1315         }
1316
1317         request = op->request->payload;
1318
1319         dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1320                         request->primary_intf_id);
1321
1322         return gb_svc_queue_deferred_request(op);
1323 }
1324
1325 static int gb_svc_module_removed_recv(struct gb_operation *op)
1326 {
1327         struct gb_svc *svc = gb_connection_get_data(op->connection);
1328         struct gb_svc_module_removed_request *request;
1329
1330         if (op->request->payload_size < sizeof(*request)) {
1331                 dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
1332                                 op->request->payload_size, sizeof(*request));
1333                 return -EINVAL;
1334         }
1335
1336         request = op->request->payload;
1337
1338         dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1339                         request->primary_intf_id);
1340
1341         return gb_svc_queue_deferred_request(op);
1342 }
1343
1344 static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
1345 {
1346         struct gb_svc *svc = gb_connection_get_data(op->connection);
1347         struct gb_svc_intf_mailbox_event_request *request;
1348
1349         if (op->request->payload_size < sizeof(*request)) {
1350                 dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
1351                                 op->request->payload_size, sizeof(*request));
1352                 return -EINVAL;
1353         }
1354
1355         request = op->request->payload;
1356
1357         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1358
1359         return gb_svc_queue_deferred_request(op);
1360 }
1361
1362 static int gb_svc_request_handler(struct gb_operation *op)
1363 {
1364         struct gb_connection *connection = op->connection;
1365         struct gb_svc *svc = gb_connection_get_data(connection);
1366         u8 type = op->type;
1367         int ret = 0;
1368
1369         /*
1370          * SVC requests need to follow a specific order (at least initially) and
1371          * below code takes care of enforcing that. The expected order is:
1372          * - PROTOCOL_VERSION
1373          * - SVC_HELLO
1374          * - Any other request, but the earlier two.
1375          *
1376          * Incoming requests are guaranteed to be serialized and so we don't
1377          * need to protect 'state' for any races.
1378          */
1379         switch (type) {
1380         case GB_SVC_TYPE_PROTOCOL_VERSION:
1381                 if (svc->state != GB_SVC_STATE_RESET)
1382                         ret = -EINVAL;
1383                 break;
1384         case GB_SVC_TYPE_SVC_HELLO:
1385                 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
1386                         ret = -EINVAL;
1387                 break;
1388         default:
1389                 if (svc->state != GB_SVC_STATE_SVC_HELLO)
1390                         ret = -EINVAL;
1391                 break;
1392         }
1393
1394         if (ret) {
1395                 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
1396                                 type, svc->state);
1397                 return ret;
1398         }
1399
1400         switch (type) {
1401         case GB_SVC_TYPE_PROTOCOL_VERSION:
1402                 ret = gb_svc_version_request(op);
1403                 if (!ret)
1404                         svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
1405                 return ret;
1406         case GB_SVC_TYPE_SVC_HELLO:
1407                 ret = gb_svc_hello(op);
1408                 if (!ret)
1409                         svc->state = GB_SVC_STATE_SVC_HELLO;
1410                 return ret;
1411         case GB_SVC_TYPE_INTF_HOTPLUG:
1412                 return gb_svc_intf_hotplug_recv(op);
1413         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
1414                 return gb_svc_intf_hot_unplug_recv(op);
1415         case GB_SVC_TYPE_INTF_RESET:
1416                 return gb_svc_intf_reset_recv(op);
1417         case GB_SVC_TYPE_KEY_EVENT:
1418                 return gb_svc_key_event_recv(op);
1419         case GB_SVC_TYPE_MODULE_INSERTED:
1420                 return gb_svc_module_inserted_recv(op);
1421         case GB_SVC_TYPE_MODULE_REMOVED:
1422                 return gb_svc_module_removed_recv(op);
1423         case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1424                 return gb_svc_intf_mailbox_event_recv(op);
1425         default:
1426                 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
1427                 return -EINVAL;
1428         }
1429 }
1430
1431 static struct input_dev *gb_svc_input_create(struct gb_svc *svc)
1432 {
1433         struct input_dev *input_dev;
1434
1435         input_dev = input_allocate_device();
1436         if (!input_dev)
1437                 return ERR_PTR(-ENOMEM);
1438
1439         input_dev->name = dev_name(&svc->dev);
1440         svc->input_phys = kasprintf(GFP_KERNEL, "greybus-%s/input0",
1441                                     input_dev->name);
1442         if (!svc->input_phys)
1443                 goto err_free_input;
1444
1445         input_dev->phys = svc->input_phys;
1446         input_dev->dev.parent = &svc->dev;
1447
1448         input_set_drvdata(input_dev, svc);
1449
1450         input_set_capability(input_dev, EV_KEY, SVC_KEY_ARA_BUTTON);
1451
1452         return input_dev;
1453
1454 err_free_input:
1455         input_free_device(svc->input);
1456         return ERR_PTR(-ENOMEM);
1457 }
1458
1459 static void gb_svc_release(struct device *dev)
1460 {
1461         struct gb_svc *svc = to_gb_svc(dev);
1462
1463         if (svc->connection)
1464                 gb_connection_destroy(svc->connection);
1465         ida_destroy(&svc->device_id_map);
1466         destroy_workqueue(svc->wq);
1467         kfree(svc->input_phys);
1468         kfree(svc);
1469 }
1470
1471 struct device_type greybus_svc_type = {
1472         .name           = "greybus_svc",
1473         .release        = gb_svc_release,
1474 };
1475
1476 struct gb_svc *gb_svc_create(struct gb_host_device *hd)
1477 {
1478         struct gb_svc *svc;
1479
1480         svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1481         if (!svc)
1482                 return NULL;
1483
1484         svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
1485         if (!svc->wq) {
1486                 kfree(svc);
1487                 return NULL;
1488         }
1489
1490         svc->dev.parent = &hd->dev;
1491         svc->dev.bus = &greybus_bus_type;
1492         svc->dev.type = &greybus_svc_type;
1493         svc->dev.groups = svc_groups;
1494         svc->dev.dma_mask = svc->dev.parent->dma_mask;
1495         device_initialize(&svc->dev);
1496
1497         dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
1498
1499         ida_init(&svc->device_id_map);
1500         svc->state = GB_SVC_STATE_RESET;
1501         svc->hd = hd;
1502
1503         svc->input = gb_svc_input_create(svc);
1504         if (IS_ERR(svc->input)) {
1505                 dev_err(&svc->dev, "failed to create input device: %ld\n",
1506                         PTR_ERR(svc->input));
1507                 goto err_put_device;
1508         }
1509
1510         svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
1511                                                 gb_svc_request_handler);
1512         if (IS_ERR(svc->connection)) {
1513                 dev_err(&svc->dev, "failed to create connection: %ld\n",
1514                                 PTR_ERR(svc->connection));
1515                 goto err_free_input;
1516         }
1517
1518         gb_connection_set_data(svc->connection, svc);
1519
1520         return svc;
1521
1522 err_free_input:
1523         input_free_device(svc->input);
1524 err_put_device:
1525         put_device(&svc->dev);
1526         return NULL;
1527 }
1528
1529 int gb_svc_add(struct gb_svc *svc)
1530 {
1531         int ret;
1532
1533         /*
1534          * The SVC protocol is currently driven by the SVC, so the SVC device
1535          * is added from the connection request handler when enough
1536          * information has been received.
1537          */
1538         ret = gb_connection_enable(svc->connection);
1539         if (ret)
1540                 return ret;
1541
1542         return 0;
1543 }
1544
1545 static void gb_svc_remove_modules(struct gb_svc *svc)
1546 {
1547         struct gb_host_device *hd = svc->hd;
1548         struct gb_module *module, *tmp;
1549
1550         list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
1551                 gb_module_del(module);
1552                 list_del(&module->hd_node);
1553                 gb_module_put(module);
1554         }
1555 }
1556
1557 void gb_svc_del(struct gb_svc *svc)
1558 {
1559         gb_connection_disable(svc->connection);
1560
1561         /*
1562          * The SVC device and input device may have been registered
1563          * from the request handler.
1564          */
1565         if (device_is_registered(&svc->dev)) {
1566                 gb_svc_debugfs_exit(svc);
1567                 gb_svc_watchdog_destroy(svc);
1568                 input_unregister_device(svc->input);
1569                 device_del(&svc->dev);
1570         }
1571
1572         flush_workqueue(svc->wq);
1573
1574         gb_svc_remove_modules(svc);
1575 }
1576
1577 void gb_svc_put(struct gb_svc *svc)
1578 {
1579         put_device(&svc->dev);
1580 }