greybus: svc: remove deprecated hotplug operations
[cascardo/linux.git] / drivers / staging / greybus / svc.c
1 /*
2  * SVC Greybus driver.
3  *
4  * Copyright 2015 Google Inc.
5  * Copyright 2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/debugfs.h>
11 #include <linux/input.h>
12 #include <linux/workqueue.h>
13
14 #include "greybus.h"
15
16 #define SVC_KEY_ARA_BUTTON      KEY_A
17
18 #define SVC_INTF_EJECT_TIMEOUT          9000
19 #define SVC_INTF_ACTIVATE_TIMEOUT       6000
20
21 struct gb_svc_deferred_request {
22         struct work_struct work;
23         struct gb_operation *operation;
24 };
25
26
27 static int gb_svc_queue_deferred_request(struct gb_operation *operation);
28
29 static ssize_t endo_id_show(struct device *dev,
30                         struct device_attribute *attr, char *buf)
31 {
32         struct gb_svc *svc = to_gb_svc(dev);
33
34         return sprintf(buf, "0x%04x\n", svc->endo_id);
35 }
36 static DEVICE_ATTR_RO(endo_id);
37
38 static ssize_t ap_intf_id_show(struct device *dev,
39                         struct device_attribute *attr, char *buf)
40 {
41         struct gb_svc *svc = to_gb_svc(dev);
42
43         return sprintf(buf, "%u\n", svc->ap_intf_id);
44 }
45 static DEVICE_ATTR_RO(ap_intf_id);
46
47
48 // FIXME
49 // This is a hack, we need to do this "right" and clean the interface up
50 // properly, not just forcibly yank the thing out of the system and hope for the
51 // best.  But for now, people want their modules to come out without having to
52 // throw the thing to the ground or get out a screwdriver.
53 static ssize_t intf_eject_store(struct device *dev,
54                                 struct device_attribute *attr, const char *buf,
55                                 size_t len)
56 {
57         struct gb_svc *svc = to_gb_svc(dev);
58         unsigned short intf_id;
59         int ret;
60
61         ret = kstrtou16(buf, 10, &intf_id);
62         if (ret < 0)
63                 return ret;
64
65         dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
66
67         ret = gb_svc_intf_eject(svc, intf_id);
68         if (ret < 0)
69                 return ret;
70
71         return len;
72 }
73 static DEVICE_ATTR_WO(intf_eject);
74
75 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
76                              char *buf)
77 {
78         struct gb_svc *svc = to_gb_svc(dev);
79
80         return sprintf(buf, "%s\n",
81                        gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
82 }
83
84 static ssize_t watchdog_store(struct device *dev,
85                               struct device_attribute *attr, const char *buf,
86                               size_t len)
87 {
88         struct gb_svc *svc = to_gb_svc(dev);
89         int retval;
90         bool user_request;
91
92         retval = strtobool(buf, &user_request);
93         if (retval)
94                 return retval;
95
96         if (user_request)
97                 retval = gb_svc_watchdog_enable(svc);
98         else
99                 retval = gb_svc_watchdog_disable(svc);
100         if (retval)
101                 return retval;
102         return len;
103 }
104 static DEVICE_ATTR_RW(watchdog);
105
106 static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
107 {
108         struct gb_svc_pwrmon_rail_count_get_response response;
109         int ret;
110
111         ret = gb_operation_sync(svc->connection,
112                                 GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
113                                 &response, sizeof(response));
114         if (ret) {
115                 dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
116                 return ret;
117         }
118
119         *value = response.rail_count;
120
121         return 0;
122 }
123
124 static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
125                 struct gb_svc_pwrmon_rail_names_get_response *response,
126                 size_t bufsize)
127 {
128         int ret;
129
130         ret = gb_operation_sync(svc->connection,
131                                 GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
132                                 response, bufsize);
133         if (ret) {
134                 dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
135                 return ret;
136         }
137
138         return 0;
139 }
140
141 static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
142                                     u8 measurement_type, u32 *value)
143 {
144         struct gb_svc_pwrmon_sample_get_request request;
145         struct gb_svc_pwrmon_sample_get_response response;
146         int ret;
147
148         request.rail_id = rail_id;
149         request.measurement_type = measurement_type;
150
151         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
152                                 &request, sizeof(request),
153                                 &response, sizeof(response));
154         if (ret) {
155                 dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
156                 return ret;
157         }
158
159         if (response.result) {
160                 dev_err(&svc->dev,
161                         "UniPro error while getting rail power sample (%d %d): %d\n",
162                         rail_id, measurement_type, response.result);
163                 switch (response.result) {
164                 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
165                         return -EINVAL;
166                 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
167                         return -ENOMSG;
168                 default:
169                         return -EREMOTEIO;
170                 }
171         }
172
173         *value = le32_to_cpu(response.measurement);
174
175         return 0;
176 }
177
178 int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
179                                   u8 measurement_type, u32 *value)
180 {
181         struct gb_svc_pwrmon_intf_sample_get_request request;
182         struct gb_svc_pwrmon_intf_sample_get_response response;
183         int ret;
184
185         request.intf_id = intf_id;
186         request.measurement_type = measurement_type;
187
188         ret = gb_operation_sync(svc->connection,
189                                 GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
190                                 &request, sizeof(request),
191                                 &response, sizeof(response));
192         if (ret) {
193                 dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
194                 return ret;
195         }
196
197         if (response.result) {
198                 dev_err(&svc->dev,
199                         "UniPro error while getting intf power sample (%d %d): %d\n",
200                         intf_id, measurement_type, response.result);
201                 switch (response.result) {
202                 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
203                         return -EINVAL;
204                 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
205                         return -ENOMSG;
206                 default:
207                         return -EREMOTEIO;
208                 }
209         }
210
211         *value = le32_to_cpu(response.measurement);
212
213         return 0;
214 }
215
216 static struct attribute *svc_attrs[] = {
217         &dev_attr_endo_id.attr,
218         &dev_attr_ap_intf_id.attr,
219         &dev_attr_intf_eject.attr,
220         &dev_attr_watchdog.attr,
221         NULL,
222 };
223 ATTRIBUTE_GROUPS(svc);
224
225 int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
226 {
227         struct gb_svc_intf_device_id_request request;
228
229         request.intf_id = intf_id;
230         request.device_id = device_id;
231
232         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
233                                  &request, sizeof(request), NULL, 0);
234 }
235
236 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
237 {
238         struct gb_svc_intf_eject_request request;
239         int ret;
240
241         request.intf_id = intf_id;
242
243         /*
244          * The pulse width for module release in svc is long so we need to
245          * increase the timeout so the operation will not return to soon.
246          */
247         ret = gb_operation_sync_timeout(svc->connection,
248                                         GB_SVC_TYPE_INTF_EJECT, &request,
249                                         sizeof(request), NULL, 0,
250                                         SVC_INTF_EJECT_TIMEOUT);
251         if (ret) {
252                 dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
253                 return ret;
254         }
255
256         return 0;
257 }
258
259 int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
260 {
261         struct gb_svc_intf_vsys_request request;
262         struct gb_svc_intf_vsys_response response;
263         int type, ret;
264
265         request.intf_id = intf_id;
266
267         if (enable)
268                 type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
269         else
270                 type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
271
272         ret = gb_operation_sync(svc->connection, type,
273                         &request, sizeof(request),
274                         &response, sizeof(response));
275         if (ret < 0)
276                 return ret;
277         if (response.result_code != GB_SVC_INTF_VSYS_OK)
278                 return -EREMOTEIO;
279         return 0;
280 }
281
282 int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
283 {
284         struct gb_svc_intf_refclk_request request;
285         struct gb_svc_intf_refclk_response response;
286         int type, ret;
287
288         request.intf_id = intf_id;
289
290         if (enable)
291                 type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
292         else
293                 type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
294
295         ret = gb_operation_sync(svc->connection, type,
296                         &request, sizeof(request),
297                         &response, sizeof(response));
298         if (ret < 0)
299                 return ret;
300         if (response.result_code != GB_SVC_INTF_REFCLK_OK)
301                 return -EREMOTEIO;
302         return 0;
303 }
304
305 int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
306 {
307         struct gb_svc_intf_unipro_request request;
308         struct gb_svc_intf_unipro_response response;
309         int type, ret;
310
311         request.intf_id = intf_id;
312
313         if (enable)
314                 type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
315         else
316                 type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
317
318         ret = gb_operation_sync(svc->connection, type,
319                         &request, sizeof(request),
320                         &response, sizeof(response));
321         if (ret < 0)
322                 return ret;
323         if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
324                 return -EREMOTEIO;
325         return 0;
326 }
327
328 int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
329 {
330         struct gb_svc_intf_activate_request request;
331         struct gb_svc_intf_activate_response response;
332         int ret;
333
334         request.intf_id = intf_id;
335
336         ret = gb_operation_sync_timeout(svc->connection,
337                         GB_SVC_TYPE_INTF_ACTIVATE,
338                         &request, sizeof(request),
339                         &response, sizeof(response),
340                         SVC_INTF_ACTIVATE_TIMEOUT);
341         if (ret < 0)
342                 return ret;
343         if (response.status != GB_SVC_OP_SUCCESS) {
344                 dev_err(&svc->dev, "failed to activate interface %u: %u\n",
345                                 intf_id, response.status);
346                 return -EREMOTEIO;
347         }
348
349         *intf_type = response.intf_type;
350
351         return 0;
352 }
353
354 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
355                         u32 *value)
356 {
357         struct gb_svc_dme_peer_get_request request;
358         struct gb_svc_dme_peer_get_response response;
359         u16 result;
360         int ret;
361
362         request.intf_id = intf_id;
363         request.attr = cpu_to_le16(attr);
364         request.selector = cpu_to_le16(selector);
365
366         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
367                                 &request, sizeof(request),
368                                 &response, sizeof(response));
369         if (ret) {
370                 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
371                                 intf_id, attr, selector, ret);
372                 return ret;
373         }
374
375         result = le16_to_cpu(response.result_code);
376         if (result) {
377                 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
378                                 intf_id, attr, selector, result);
379                 return -EREMOTEIO;
380         }
381
382         if (value)
383                 *value = le32_to_cpu(response.attr_value);
384
385         return 0;
386 }
387 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
388
389 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
390                         u32 value)
391 {
392         struct gb_svc_dme_peer_set_request request;
393         struct gb_svc_dme_peer_set_response response;
394         u16 result;
395         int ret;
396
397         request.intf_id = intf_id;
398         request.attr = cpu_to_le16(attr);
399         request.selector = cpu_to_le16(selector);
400         request.value = cpu_to_le32(value);
401
402         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
403                                 &request, sizeof(request),
404                                 &response, sizeof(response));
405         if (ret) {
406                 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
407                                 intf_id, attr, selector, value, ret);
408                 return ret;
409         }
410
411         result = le16_to_cpu(response.result_code);
412         if (result) {
413                 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
414                                 intf_id, attr, selector, value, result);
415                 return -EREMOTEIO;
416         }
417
418         return 0;
419 }
420 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
421
422 int gb_svc_connection_create(struct gb_svc *svc,
423                                 u8 intf1_id, u16 cport1_id,
424                                 u8 intf2_id, u16 cport2_id,
425                                 u8 cport_flags)
426 {
427         struct gb_svc_conn_create_request request;
428
429         request.intf1_id = intf1_id;
430         request.cport1_id = cpu_to_le16(cport1_id);
431         request.intf2_id = intf2_id;
432         request.cport2_id = cpu_to_le16(cport2_id);
433         request.tc = 0;         /* TC0 */
434         request.flags = cport_flags;
435
436         return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
437                                  &request, sizeof(request), NULL, 0);
438 }
439 EXPORT_SYMBOL_GPL(gb_svc_connection_create);
440
441 void gb_svc_connection_quiescing(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
442                                         u8 intf2_id, u16 cport2_id)
443 {
444         /* FIXME: implement */
445
446         dev_dbg(&svc->dev, "%s - (%u:%u %u:%u)\n", __func__,
447                                 intf1_id, cport1_id, intf2_id, cport2_id);
448 }
449 EXPORT_SYMBOL_GPL(gb_svc_connection_quiescing);
450
451 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
452                                u8 intf2_id, u16 cport2_id)
453 {
454         struct gb_svc_conn_destroy_request request;
455         struct gb_connection *connection = svc->connection;
456         int ret;
457
458         request.intf1_id = intf1_id;
459         request.cport1_id = cpu_to_le16(cport1_id);
460         request.intf2_id = intf2_id;
461         request.cport2_id = cpu_to_le16(cport2_id);
462
463         ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
464                                 &request, sizeof(request), NULL, 0);
465         if (ret) {
466                 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
467                                 intf1_id, cport1_id, intf2_id, cport2_id, ret);
468         }
469 }
470 EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
471
472 int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
473                            u32 strobe_delay, u32 refclk)
474 {
475         struct gb_connection *connection = svc->connection;
476         struct gb_svc_timesync_enable_request request;
477
478         request.count = count;
479         request.frame_time = cpu_to_le64(frame_time);
480         request.strobe_delay = cpu_to_le32(strobe_delay);
481         request.refclk = cpu_to_le32(refclk);
482         return gb_operation_sync(connection,
483                                  GB_SVC_TYPE_TIMESYNC_ENABLE,
484                                  &request, sizeof(request), NULL, 0);
485 }
486 EXPORT_SYMBOL_GPL(gb_svc_timesync_enable);
487
488 int gb_svc_timesync_disable(struct gb_svc *svc)
489 {
490         struct gb_connection *connection = svc->connection;
491
492         return gb_operation_sync(connection,
493                                  GB_SVC_TYPE_TIMESYNC_DISABLE,
494                                  NULL, 0, NULL, 0);
495 }
496 EXPORT_SYMBOL_GPL(gb_svc_timesync_disable);
497
498 int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time)
499 {
500         struct gb_connection *connection = svc->connection;
501         struct gb_svc_timesync_authoritative_response response;
502         int ret, i;
503
504         ret = gb_operation_sync(connection,
505                                 GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE, NULL, 0,
506                                 &response, sizeof(response));
507         if (ret < 0)
508                 return ret;
509
510         for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
511                 frame_time[i] = le64_to_cpu(response.frame_time[i]);
512         return 0;
513 }
514 EXPORT_SYMBOL_GPL(gb_svc_timesync_authoritative);
515
516 int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time)
517 {
518         struct gb_connection *connection = svc->connection;
519         struct gb_svc_timesync_ping_response response;
520         int ret;
521
522         ret = gb_operation_sync(connection,
523                                 GB_SVC_TYPE_TIMESYNC_PING,
524                                 NULL, 0,
525                                 &response, sizeof(response));
526         if (ret < 0)
527                 return ret;
528
529         *frame_time = le64_to_cpu(response.frame_time);
530         return 0;
531 }
532 EXPORT_SYMBOL_GPL(gb_svc_timesync_ping);
533
534 int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask)
535 {
536         struct gb_connection *connection = svc->connection;
537         struct gb_svc_timesync_wake_pins_acquire_request request;
538
539         request.strobe_mask = cpu_to_le32(strobe_mask);
540         return gb_operation_sync(connection,
541                                  GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE,
542                                  &request, sizeof(request),
543                                  NULL, 0);
544 }
545 EXPORT_SYMBOL_GPL(gb_svc_timesync_wake_pins_acquire);
546
547 int gb_svc_timesync_wake_pins_release(struct gb_svc *svc)
548 {
549         struct gb_connection *connection = svc->connection;
550
551         return gb_operation_sync(connection,
552                                  GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE,
553                                  NULL, 0, NULL, 0);
554 }
555 EXPORT_SYMBOL_GPL(gb_svc_timesync_wake_pins_release);
556
557 /* Creates bi-directional routes between the devices */
558 int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
559                                u8 intf2_id, u8 dev2_id)
560 {
561         struct gb_svc_route_create_request request;
562
563         request.intf1_id = intf1_id;
564         request.dev1_id = dev1_id;
565         request.intf2_id = intf2_id;
566         request.dev2_id = dev2_id;
567
568         return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
569                                  &request, sizeof(request), NULL, 0);
570 }
571
572 /* Destroys bi-directional routes between the devices */
573 void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
574 {
575         struct gb_svc_route_destroy_request request;
576         int ret;
577
578         request.intf1_id = intf1_id;
579         request.intf2_id = intf2_id;
580
581         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
582                                 &request, sizeof(request), NULL, 0);
583         if (ret) {
584                 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
585                                 intf1_id, intf2_id, ret);
586         }
587 }
588
589 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
590                                u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
591                                u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
592                                u8 flags, u32 quirks)
593 {
594         struct gb_svc_intf_set_pwrm_request request;
595         struct gb_svc_intf_set_pwrm_response response;
596         int ret;
597
598         request.intf_id = intf_id;
599         request.hs_series = hs_series;
600         request.tx_mode = tx_mode;
601         request.tx_gear = tx_gear;
602         request.tx_nlanes = tx_nlanes;
603         request.rx_mode = rx_mode;
604         request.rx_gear = rx_gear;
605         request.rx_nlanes = rx_nlanes;
606         request.flags = flags;
607         request.quirks = cpu_to_le32(quirks);
608
609         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
610                                 &request, sizeof(request),
611                                 &response, sizeof(response));
612         if (ret < 0)
613                 return ret;
614
615         return le16_to_cpu(response.result_code);
616 }
617 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
618
619 int gb_svc_ping(struct gb_svc *svc)
620 {
621         return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
622                                          NULL, 0, NULL, 0,
623                                          GB_OPERATION_TIMEOUT_DEFAULT * 2);
624 }
625 EXPORT_SYMBOL_GPL(gb_svc_ping);
626
627 static int gb_svc_version_request(struct gb_operation *op)
628 {
629         struct gb_connection *connection = op->connection;
630         struct gb_svc *svc = gb_connection_get_data(connection);
631         struct gb_svc_version_request *request;
632         struct gb_svc_version_response *response;
633
634         if (op->request->payload_size < sizeof(*request)) {
635                 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
636                                 op->request->payload_size,
637                                 sizeof(*request));
638                 return -EINVAL;
639         }
640
641         request = op->request->payload;
642
643         if (request->major > GB_SVC_VERSION_MAJOR) {
644                 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
645                                 request->major, GB_SVC_VERSION_MAJOR);
646                 return -ENOTSUPP;
647         }
648
649         svc->protocol_major = request->major;
650         svc->protocol_minor = request->minor;
651
652         if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
653                 return -ENOMEM;
654
655         response = op->response->payload;
656         response->major = svc->protocol_major;
657         response->minor = svc->protocol_minor;
658
659         return 0;
660 }
661
662 static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
663                                         size_t len, loff_t *offset)
664 {
665         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
666         struct gb_svc *svc = pwrmon_rails->svc;
667         int ret, desc;
668         u32 value;
669         char buff[16];
670
671         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
672                                        GB_SVC_PWRMON_TYPE_VOL, &value);
673         if (ret) {
674                 dev_err(&svc->dev,
675                         "failed to get voltage sample %u: %d\n",
676                         pwrmon_rails->id, ret);
677                 return ret;
678         }
679
680         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
681
682         return simple_read_from_buffer(buf, len, offset, buff, desc);
683 }
684
685 static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
686                                         size_t len, loff_t *offset)
687 {
688         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
689         struct gb_svc *svc = pwrmon_rails->svc;
690         int ret, desc;
691         u32 value;
692         char buff[16];
693
694         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
695                                        GB_SVC_PWRMON_TYPE_CURR, &value);
696         if (ret) {
697                 dev_err(&svc->dev,
698                         "failed to get current sample %u: %d\n",
699                         pwrmon_rails->id, ret);
700                 return ret;
701         }
702
703         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
704
705         return simple_read_from_buffer(buf, len, offset, buff, desc);
706 }
707
708 static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
709                                       size_t len, loff_t *offset)
710 {
711         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
712         struct gb_svc *svc = pwrmon_rails->svc;
713         int ret, desc;
714         u32 value;
715         char buff[16];
716
717         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
718                                        GB_SVC_PWRMON_TYPE_PWR, &value);
719         if (ret) {
720                 dev_err(&svc->dev, "failed to get power sample %u: %d\n",
721                         pwrmon_rails->id, ret);
722                 return ret;
723         }
724
725         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
726
727         return simple_read_from_buffer(buf, len, offset, buff, desc);
728 }
729
730 static const struct file_operations pwrmon_debugfs_voltage_fops = {
731         .read           = pwr_debugfs_voltage_read,
732 };
733
734 static const struct file_operations pwrmon_debugfs_current_fops = {
735         .read           = pwr_debugfs_current_read,
736 };
737
738 static const struct file_operations pwrmon_debugfs_power_fops = {
739         .read           = pwr_debugfs_power_read,
740 };
741
742 static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
743 {
744         int i;
745         size_t bufsize;
746         struct dentry *dent;
747         struct gb_svc_pwrmon_rail_names_get_response *rail_names;
748         u8 rail_count;
749
750         dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
751         if (IS_ERR_OR_NULL(dent))
752                 return;
753
754         if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
755                 goto err_pwrmon_debugfs;
756
757         if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
758                 goto err_pwrmon_debugfs;
759
760         bufsize = GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
761
762         rail_names = kzalloc(bufsize, GFP_KERNEL);
763         if (!rail_names)
764                 goto err_pwrmon_debugfs;
765
766         svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
767                                     GFP_KERNEL);
768         if (!svc->pwrmon_rails)
769                 goto err_pwrmon_debugfs_free;
770
771         if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
772                 goto err_pwrmon_debugfs_free;
773
774         for (i = 0; i < rail_count; i++) {
775                 struct dentry *dir;
776                 struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
777                 char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
778
779                 snprintf(fname, sizeof(fname), "%s",
780                          (char *)&rail_names->name[i]);
781
782                 rail->id = i;
783                 rail->svc = svc;
784
785                 dir = debugfs_create_dir(fname, dent);
786                 debugfs_create_file("voltage_now", S_IRUGO, dir, rail,
787                                     &pwrmon_debugfs_voltage_fops);
788                 debugfs_create_file("current_now", S_IRUGO, dir, rail,
789                                     &pwrmon_debugfs_current_fops);
790                 debugfs_create_file("power_now", S_IRUGO, dir, rail,
791                                     &pwrmon_debugfs_power_fops);
792         }
793
794         kfree(rail_names);
795         return;
796
797 err_pwrmon_debugfs_free:
798         kfree(rail_names);
799         kfree(svc->pwrmon_rails);
800         svc->pwrmon_rails = NULL;
801
802 err_pwrmon_debugfs:
803         debugfs_remove(dent);
804 }
805
806 static void gb_svc_debugfs_init(struct gb_svc *svc)
807 {
808         svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
809                                                  gb_debugfs_get());
810         gb_svc_pwrmon_debugfs_init(svc);
811 }
812
813 static void gb_svc_debugfs_exit(struct gb_svc *svc)
814 {
815         debugfs_remove_recursive(svc->debugfs_dentry);
816         kfree(svc->pwrmon_rails);
817         svc->pwrmon_rails = NULL;
818 }
819
820 static int gb_svc_hello(struct gb_operation *op)
821 {
822         struct gb_connection *connection = op->connection;
823         struct gb_svc *svc = gb_connection_get_data(connection);
824         struct gb_svc_hello_request *hello_request;
825         int ret;
826
827         if (op->request->payload_size < sizeof(*hello_request)) {
828                 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
829                                 op->request->payload_size,
830                                 sizeof(*hello_request));
831                 return -EINVAL;
832         }
833
834         hello_request = op->request->payload;
835         svc->endo_id = le16_to_cpu(hello_request->endo_id);
836         svc->ap_intf_id = hello_request->interface_id;
837
838         ret = device_add(&svc->dev);
839         if (ret) {
840                 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
841                 return ret;
842         }
843
844         ret = input_register_device(svc->input);
845         if (ret) {
846                 dev_err(&svc->dev, "failed to register input: %d\n", ret);
847                 device_del(&svc->dev);
848                 return ret;
849         }
850
851         ret = gb_svc_watchdog_create(svc);
852         if (ret) {
853                 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
854                 input_unregister_device(svc->input);
855                 device_del(&svc->dev);
856                 return ret;
857         }
858
859         gb_svc_debugfs_init(svc);
860
861         return gb_svc_queue_deferred_request(op);
862 }
863
864 static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
865                                                         u8 intf_id)
866 {
867         struct gb_host_device *hd = svc->hd;
868         struct gb_module *module;
869         size_t num_interfaces;
870         u8 module_id;
871
872         list_for_each_entry(module, &hd->modules, hd_node) {
873                 module_id = module->module_id;
874                 num_interfaces = module->num_interfaces;
875
876                 if (intf_id >= module_id &&
877                                 intf_id < module_id + num_interfaces) {
878                         return module->interfaces[intf_id - module_id];
879                 }
880         }
881
882         return NULL;
883 }
884
885 static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
886 {
887         struct gb_host_device *hd = svc->hd;
888         struct gb_module *module;
889
890         list_for_each_entry(module, &hd->modules, hd_node) {
891                 if (module->module_id == module_id)
892                         return module;
893         }
894
895         return NULL;
896 }
897
898 static void gb_svc_process_hello_deferred(struct gb_operation *operation)
899 {
900         struct gb_connection *connection = operation->connection;
901         struct gb_svc *svc = gb_connection_get_data(connection);
902         int ret;
903
904         /*
905          * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
906          * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
907          * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
908          * module.
909          *
910          * The code should be removed once SW-2217, Heuristic for UniPro
911          * Power Mode Changes is resolved.
912          */
913         ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
914                                         GB_SVC_UNIPRO_HS_SERIES_A,
915                                         GB_SVC_UNIPRO_SLOW_AUTO_MODE,
916                                         2, 1,
917                                         GB_SVC_UNIPRO_SLOW_AUTO_MODE,
918                                         2, 1,
919                                         0, 0);
920
921         if (ret)
922                 dev_warn(&svc->dev,
923                         "power mode change failed on AP to switch link: %d\n",
924                         ret);
925 }
926
927 static void gb_svc_process_module_inserted(struct gb_operation *operation)
928 {
929         struct gb_svc_module_inserted_request *request;
930         struct gb_connection *connection = operation->connection;
931         struct gb_svc *svc = gb_connection_get_data(connection);
932         struct gb_host_device *hd = svc->hd;
933         struct gb_module *module;
934         size_t num_interfaces;
935         u8 module_id;
936         u16 flags;
937         int ret;
938
939         /* The request message size has already been verified. */
940         request = operation->request->payload;
941         module_id = request->primary_intf_id;
942         num_interfaces = request->intf_count;
943         flags = le16_to_cpu(request->flags);
944
945         dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
946                         __func__, module_id, num_interfaces, flags);
947
948         if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
949                 dev_warn(&svc->dev, "no primary interface detected on module %u\n",
950                                 module_id);
951         }
952
953         module = gb_svc_module_lookup(svc, module_id);
954         if (module) {
955                 dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
956                                 module_id);
957                 return;
958         }
959
960         module = gb_module_create(hd, module_id, num_interfaces);
961         if (!module) {
962                 dev_err(&svc->dev, "failed to create module\n");
963                 return;
964         }
965
966         ret = gb_module_add(module);
967         if (ret) {
968                 gb_module_put(module);
969                 return;
970         }
971
972         list_add(&module->hd_node, &hd->modules);
973 }
974
975 static void gb_svc_process_module_removed(struct gb_operation *operation)
976 {
977         struct gb_svc_module_removed_request *request;
978         struct gb_connection *connection = operation->connection;
979         struct gb_svc *svc = gb_connection_get_data(connection);
980         struct gb_module *module;
981         u8 module_id;
982
983         /* The request message size has already been verified. */
984         request = operation->request->payload;
985         module_id = request->primary_intf_id;
986
987         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
988
989         module = gb_svc_module_lookup(svc, module_id);
990         if (!module) {
991                 dev_warn(&svc->dev, "unexpected module-removed event %u\n",
992                                 module_id);
993                 return;
994         }
995
996         module->disconnected = true;
997
998         gb_module_del(module);
999         list_del(&module->hd_node);
1000         gb_module_put(module);
1001 }
1002
1003 static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
1004 {
1005         struct gb_svc_intf_mailbox_event_request *request;
1006         struct gb_connection *connection = operation->connection;
1007         struct gb_svc *svc = gb_connection_get_data(connection);
1008         struct gb_interface *intf;
1009         u8 intf_id;
1010         u16 result_code;
1011         u32 mailbox;
1012
1013         /* The request message size has already been verified. */
1014         request = operation->request->payload;
1015         intf_id = request->intf_id;
1016         result_code = le16_to_cpu(request->result_code);
1017         mailbox = le32_to_cpu(request->mailbox);
1018
1019         dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
1020                         __func__, intf_id, result_code, mailbox);
1021
1022         intf = gb_svc_interface_lookup(svc, intf_id);
1023         if (!intf) {
1024                 dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
1025                 return;
1026         }
1027
1028         gb_interface_mailbox_event(intf, result_code, mailbox);
1029 }
1030
1031 static void gb_svc_process_deferred_request(struct work_struct *work)
1032 {
1033         struct gb_svc_deferred_request *dr;
1034         struct gb_operation *operation;
1035         struct gb_svc *svc;
1036         u8 type;
1037
1038         dr = container_of(work, struct gb_svc_deferred_request, work);
1039         operation = dr->operation;
1040         svc = gb_connection_get_data(operation->connection);
1041         type = operation->request->header->type;
1042
1043         switch (type) {
1044         case GB_SVC_TYPE_SVC_HELLO:
1045                 gb_svc_process_hello_deferred(operation);
1046                 break;
1047         case GB_SVC_TYPE_MODULE_INSERTED:
1048                 gb_svc_process_module_inserted(operation);
1049                 break;
1050         case GB_SVC_TYPE_MODULE_REMOVED:
1051                 gb_svc_process_module_removed(operation);
1052                 break;
1053         case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1054                 gb_svc_process_intf_mailbox_event(operation);
1055                 break;
1056         default:
1057                 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
1058         }
1059
1060         gb_operation_put(operation);
1061         kfree(dr);
1062 }
1063
1064 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
1065 {
1066         struct gb_svc *svc = gb_connection_get_data(operation->connection);
1067         struct gb_svc_deferred_request *dr;
1068
1069         dr = kmalloc(sizeof(*dr), GFP_KERNEL);
1070         if (!dr)
1071                 return -ENOMEM;
1072
1073         gb_operation_get(operation);
1074
1075         dr->operation = operation;
1076         INIT_WORK(&dr->work, gb_svc_process_deferred_request);
1077
1078         queue_work(svc->wq, &dr->work);
1079
1080         return 0;
1081 }
1082
1083 static int gb_svc_intf_reset_recv(struct gb_operation *op)
1084 {
1085         struct gb_svc *svc = gb_connection_get_data(op->connection);
1086         struct gb_message *request = op->request;
1087         struct gb_svc_intf_reset_request *reset;
1088         u8 intf_id;
1089
1090         if (request->payload_size < sizeof(*reset)) {
1091                 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
1092                                 request->payload_size, sizeof(*reset));
1093                 return -EINVAL;
1094         }
1095         reset = request->payload;
1096
1097         intf_id = reset->intf_id;
1098
1099         /* FIXME Reset the interface here */
1100
1101         return 0;
1102 }
1103
1104 static int gb_svc_key_code_map(struct gb_svc *svc, u16 key_code, u16 *code)
1105 {
1106         switch (key_code) {
1107         case GB_KEYCODE_ARA:
1108                 *code = SVC_KEY_ARA_BUTTON;
1109                 break;
1110         default:
1111                 dev_warn(&svc->dev, "unknown keycode received: %u\n", key_code);
1112                 return -EINVAL;
1113         }
1114
1115         return 0;
1116 }
1117
1118 static int gb_svc_key_event_recv(struct gb_operation *op)
1119 {
1120         struct gb_svc *svc = gb_connection_get_data(op->connection);
1121         struct gb_message *request = op->request;
1122         struct gb_svc_key_event_request *key;
1123         u16 code;
1124         u8 event;
1125         int ret;
1126
1127         if (request->payload_size < sizeof(*key)) {
1128                 dev_warn(&svc->dev, "short key request received (%zu < %zu)\n",
1129                          request->payload_size, sizeof(*key));
1130                 return -EINVAL;
1131         }
1132
1133         key = request->payload;
1134
1135         ret = gb_svc_key_code_map(svc, le16_to_cpu(key->key_code), &code);
1136         if (ret < 0)
1137                 return ret;
1138
1139         event = key->key_event;
1140         if ((event != GB_SVC_KEY_PRESSED) && (event != GB_SVC_KEY_RELEASED)) {
1141                 dev_warn(&svc->dev, "unknown key event received: %u\n", event);
1142                 return -EINVAL;
1143         }
1144
1145         input_report_key(svc->input, code, (event == GB_SVC_KEY_PRESSED));
1146         input_sync(svc->input);
1147
1148         return 0;
1149 }
1150
1151 static int gb_svc_module_inserted_recv(struct gb_operation *op)
1152 {
1153         struct gb_svc *svc = gb_connection_get_data(op->connection);
1154         struct gb_svc_module_inserted_request *request;
1155
1156         if (op->request->payload_size < sizeof(*request)) {
1157                 dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
1158                                 op->request->payload_size, sizeof(*request));
1159                 return -EINVAL;
1160         }
1161
1162         request = op->request->payload;
1163
1164         dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1165                         request->primary_intf_id);
1166
1167         return gb_svc_queue_deferred_request(op);
1168 }
1169
1170 static int gb_svc_module_removed_recv(struct gb_operation *op)
1171 {
1172         struct gb_svc *svc = gb_connection_get_data(op->connection);
1173         struct gb_svc_module_removed_request *request;
1174
1175         if (op->request->payload_size < sizeof(*request)) {
1176                 dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
1177                                 op->request->payload_size, sizeof(*request));
1178                 return -EINVAL;
1179         }
1180
1181         request = op->request->payload;
1182
1183         dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1184                         request->primary_intf_id);
1185
1186         return gb_svc_queue_deferred_request(op);
1187 }
1188
1189 static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
1190 {
1191         struct gb_svc *svc = gb_connection_get_data(op->connection);
1192         struct gb_svc_intf_mailbox_event_request *request;
1193
1194         if (op->request->payload_size < sizeof(*request)) {
1195                 dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
1196                                 op->request->payload_size, sizeof(*request));
1197                 return -EINVAL;
1198         }
1199
1200         request = op->request->payload;
1201
1202         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1203
1204         return gb_svc_queue_deferred_request(op);
1205 }
1206
1207 static int gb_svc_request_handler(struct gb_operation *op)
1208 {
1209         struct gb_connection *connection = op->connection;
1210         struct gb_svc *svc = gb_connection_get_data(connection);
1211         u8 type = op->type;
1212         int ret = 0;
1213
1214         /*
1215          * SVC requests need to follow a specific order (at least initially) and
1216          * below code takes care of enforcing that. The expected order is:
1217          * - PROTOCOL_VERSION
1218          * - SVC_HELLO
1219          * - Any other request, but the earlier two.
1220          *
1221          * Incoming requests are guaranteed to be serialized and so we don't
1222          * need to protect 'state' for any races.
1223          */
1224         switch (type) {
1225         case GB_SVC_TYPE_PROTOCOL_VERSION:
1226                 if (svc->state != GB_SVC_STATE_RESET)
1227                         ret = -EINVAL;
1228                 break;
1229         case GB_SVC_TYPE_SVC_HELLO:
1230                 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
1231                         ret = -EINVAL;
1232                 break;
1233         default:
1234                 if (svc->state != GB_SVC_STATE_SVC_HELLO)
1235                         ret = -EINVAL;
1236                 break;
1237         }
1238
1239         if (ret) {
1240                 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
1241                                 type, svc->state);
1242                 return ret;
1243         }
1244
1245         switch (type) {
1246         case GB_SVC_TYPE_PROTOCOL_VERSION:
1247                 ret = gb_svc_version_request(op);
1248                 if (!ret)
1249                         svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
1250                 return ret;
1251         case GB_SVC_TYPE_SVC_HELLO:
1252                 ret = gb_svc_hello(op);
1253                 if (!ret)
1254                         svc->state = GB_SVC_STATE_SVC_HELLO;
1255                 return ret;
1256         case GB_SVC_TYPE_INTF_RESET:
1257                 return gb_svc_intf_reset_recv(op);
1258         case GB_SVC_TYPE_KEY_EVENT:
1259                 return gb_svc_key_event_recv(op);
1260         case GB_SVC_TYPE_MODULE_INSERTED:
1261                 return gb_svc_module_inserted_recv(op);
1262         case GB_SVC_TYPE_MODULE_REMOVED:
1263                 return gb_svc_module_removed_recv(op);
1264         case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1265                 return gb_svc_intf_mailbox_event_recv(op);
1266         default:
1267                 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
1268                 return -EINVAL;
1269         }
1270 }
1271
1272 static struct input_dev *gb_svc_input_create(struct gb_svc *svc)
1273 {
1274         struct input_dev *input_dev;
1275
1276         input_dev = input_allocate_device();
1277         if (!input_dev)
1278                 return ERR_PTR(-ENOMEM);
1279
1280         input_dev->name = dev_name(&svc->dev);
1281         svc->input_phys = kasprintf(GFP_KERNEL, "greybus-%s/input0",
1282                                     input_dev->name);
1283         if (!svc->input_phys)
1284                 goto err_free_input;
1285
1286         input_dev->phys = svc->input_phys;
1287         input_dev->dev.parent = &svc->dev;
1288
1289         input_set_drvdata(input_dev, svc);
1290
1291         input_set_capability(input_dev, EV_KEY, SVC_KEY_ARA_BUTTON);
1292
1293         return input_dev;
1294
1295 err_free_input:
1296         input_free_device(svc->input);
1297         return ERR_PTR(-ENOMEM);
1298 }
1299
1300 static void gb_svc_release(struct device *dev)
1301 {
1302         struct gb_svc *svc = to_gb_svc(dev);
1303
1304         if (svc->connection)
1305                 gb_connection_destroy(svc->connection);
1306         ida_destroy(&svc->device_id_map);
1307         destroy_workqueue(svc->wq);
1308         kfree(svc->input_phys);
1309         kfree(svc);
1310 }
1311
1312 struct device_type greybus_svc_type = {
1313         .name           = "greybus_svc",
1314         .release        = gb_svc_release,
1315 };
1316
1317 struct gb_svc *gb_svc_create(struct gb_host_device *hd)
1318 {
1319         struct gb_svc *svc;
1320
1321         svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1322         if (!svc)
1323                 return NULL;
1324
1325         svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
1326         if (!svc->wq) {
1327                 kfree(svc);
1328                 return NULL;
1329         }
1330
1331         svc->dev.parent = &hd->dev;
1332         svc->dev.bus = &greybus_bus_type;
1333         svc->dev.type = &greybus_svc_type;
1334         svc->dev.groups = svc_groups;
1335         svc->dev.dma_mask = svc->dev.parent->dma_mask;
1336         device_initialize(&svc->dev);
1337
1338         dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
1339
1340         ida_init(&svc->device_id_map);
1341         svc->state = GB_SVC_STATE_RESET;
1342         svc->hd = hd;
1343
1344         svc->input = gb_svc_input_create(svc);
1345         if (IS_ERR(svc->input)) {
1346                 dev_err(&svc->dev, "failed to create input device: %ld\n",
1347                         PTR_ERR(svc->input));
1348                 goto err_put_device;
1349         }
1350
1351         svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
1352                                                 gb_svc_request_handler);
1353         if (IS_ERR(svc->connection)) {
1354                 dev_err(&svc->dev, "failed to create connection: %ld\n",
1355                                 PTR_ERR(svc->connection));
1356                 goto err_free_input;
1357         }
1358
1359         gb_connection_set_data(svc->connection, svc);
1360
1361         return svc;
1362
1363 err_free_input:
1364         input_free_device(svc->input);
1365 err_put_device:
1366         put_device(&svc->dev);
1367         return NULL;
1368 }
1369
1370 int gb_svc_add(struct gb_svc *svc)
1371 {
1372         int ret;
1373
1374         /*
1375          * The SVC protocol is currently driven by the SVC, so the SVC device
1376          * is added from the connection request handler when enough
1377          * information has been received.
1378          */
1379         ret = gb_connection_enable(svc->connection);
1380         if (ret)
1381                 return ret;
1382
1383         return 0;
1384 }
1385
1386 static void gb_svc_remove_modules(struct gb_svc *svc)
1387 {
1388         struct gb_host_device *hd = svc->hd;
1389         struct gb_module *module, *tmp;
1390
1391         list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
1392                 gb_module_del(module);
1393                 list_del(&module->hd_node);
1394                 gb_module_put(module);
1395         }
1396 }
1397
1398 void gb_svc_del(struct gb_svc *svc)
1399 {
1400         gb_connection_disable(svc->connection);
1401
1402         /*
1403          * The SVC device and input device may have been registered
1404          * from the request handler.
1405          */
1406         if (device_is_registered(&svc->dev)) {
1407                 gb_svc_debugfs_exit(svc);
1408                 gb_svc_watchdog_destroy(svc);
1409                 input_unregister_device(svc->input);
1410                 device_del(&svc->dev);
1411         }
1412
1413         flush_workqueue(svc->wq);
1414
1415         gb_svc_remove_modules(svc);
1416 }
1417
1418 void gb_svc_put(struct gb_svc *svc)
1419 {
1420         put_device(&svc->dev);
1421 }