greybus: svc: pwrmon: validate svc protocol op status when getting rail names
[cascardo/linux.git] / drivers / staging / greybus / svc.c
1 /*
2  * SVC Greybus driver.
3  *
4  * Copyright 2015 Google Inc.
5  * Copyright 2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/debugfs.h>
11 #include <linux/input.h>
12 #include <linux/workqueue.h>
13
14 #include "greybus.h"
15
16 #define SVC_KEY_ARA_BUTTON      KEY_A
17
18 #define SVC_INTF_EJECT_TIMEOUT          9000
19 #define SVC_INTF_ACTIVATE_TIMEOUT       6000
20
21 struct gb_svc_deferred_request {
22         struct work_struct work;
23         struct gb_operation *operation;
24 };
25
26
27 static int gb_svc_queue_deferred_request(struct gb_operation *operation);
28
29 static ssize_t endo_id_show(struct device *dev,
30                         struct device_attribute *attr, char *buf)
31 {
32         struct gb_svc *svc = to_gb_svc(dev);
33
34         return sprintf(buf, "0x%04x\n", svc->endo_id);
35 }
36 static DEVICE_ATTR_RO(endo_id);
37
38 static ssize_t ap_intf_id_show(struct device *dev,
39                         struct device_attribute *attr, char *buf)
40 {
41         struct gb_svc *svc = to_gb_svc(dev);
42
43         return sprintf(buf, "%u\n", svc->ap_intf_id);
44 }
45 static DEVICE_ATTR_RO(ap_intf_id);
46
47
48 // FIXME
49 // This is a hack, we need to do this "right" and clean the interface up
50 // properly, not just forcibly yank the thing out of the system and hope for the
51 // best.  But for now, people want their modules to come out without having to
52 // throw the thing to the ground or get out a screwdriver.
53 static ssize_t intf_eject_store(struct device *dev,
54                                 struct device_attribute *attr, const char *buf,
55                                 size_t len)
56 {
57         struct gb_svc *svc = to_gb_svc(dev);
58         unsigned short intf_id;
59         int ret;
60
61         ret = kstrtou16(buf, 10, &intf_id);
62         if (ret < 0)
63                 return ret;
64
65         dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
66
67         ret = gb_svc_intf_eject(svc, intf_id);
68         if (ret < 0)
69                 return ret;
70
71         return len;
72 }
73 static DEVICE_ATTR_WO(intf_eject);
74
75 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
76                              char *buf)
77 {
78         struct gb_svc *svc = to_gb_svc(dev);
79
80         return sprintf(buf, "%s\n",
81                        gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
82 }
83
84 static ssize_t watchdog_store(struct device *dev,
85                               struct device_attribute *attr, const char *buf,
86                               size_t len)
87 {
88         struct gb_svc *svc = to_gb_svc(dev);
89         int retval;
90         bool user_request;
91
92         retval = strtobool(buf, &user_request);
93         if (retval)
94                 return retval;
95
96         if (user_request)
97                 retval = gb_svc_watchdog_enable(svc);
98         else
99                 retval = gb_svc_watchdog_disable(svc);
100         if (retval)
101                 return retval;
102         return len;
103 }
104 static DEVICE_ATTR_RW(watchdog);
105
106 static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
107 {
108         struct gb_svc_pwrmon_rail_count_get_response response;
109         int ret;
110
111         ret = gb_operation_sync(svc->connection,
112                                 GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
113                                 &response, sizeof(response));
114         if (ret) {
115                 dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
116                 return ret;
117         }
118
119         *value = response.rail_count;
120
121         return 0;
122 }
123
124 static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
125                 struct gb_svc_pwrmon_rail_names_get_response *response,
126                 size_t bufsize)
127 {
128         int ret;
129
130         ret = gb_operation_sync(svc->connection,
131                                 GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
132                                 response, bufsize);
133         if (ret) {
134                 dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
135                 return ret;
136         }
137
138         if (response->status != GB_SVC_OP_SUCCESS) {
139                 dev_err(&svc->dev,
140                         "SVC error while getting rail names: %u\n",
141                         response->status);
142                 return -EREMOTEIO;
143         }
144
145         return 0;
146 }
147
148 static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
149                                     u8 measurement_type, u32 *value)
150 {
151         struct gb_svc_pwrmon_sample_get_request request;
152         struct gb_svc_pwrmon_sample_get_response response;
153         int ret;
154
155         request.rail_id = rail_id;
156         request.measurement_type = measurement_type;
157
158         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
159                                 &request, sizeof(request),
160                                 &response, sizeof(response));
161         if (ret) {
162                 dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
163                 return ret;
164         }
165
166         if (response.result) {
167                 dev_err(&svc->dev,
168                         "UniPro error while getting rail power sample (%d %d): %d\n",
169                         rail_id, measurement_type, response.result);
170                 switch (response.result) {
171                 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
172                         return -EINVAL;
173                 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
174                         return -ENOMSG;
175                 default:
176                         return -EREMOTEIO;
177                 }
178         }
179
180         *value = le32_to_cpu(response.measurement);
181
182         return 0;
183 }
184
185 int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
186                                   u8 measurement_type, u32 *value)
187 {
188         struct gb_svc_pwrmon_intf_sample_get_request request;
189         struct gb_svc_pwrmon_intf_sample_get_response response;
190         int ret;
191
192         request.intf_id = intf_id;
193         request.measurement_type = measurement_type;
194
195         ret = gb_operation_sync(svc->connection,
196                                 GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
197                                 &request, sizeof(request),
198                                 &response, sizeof(response));
199         if (ret) {
200                 dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
201                 return ret;
202         }
203
204         if (response.result) {
205                 dev_err(&svc->dev,
206                         "UniPro error while getting intf power sample (%d %d): %d\n",
207                         intf_id, measurement_type, response.result);
208                 switch (response.result) {
209                 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
210                         return -EINVAL;
211                 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
212                         return -ENOMSG;
213                 default:
214                         return -EREMOTEIO;
215                 }
216         }
217
218         *value = le32_to_cpu(response.measurement);
219
220         return 0;
221 }
222
223 static struct attribute *svc_attrs[] = {
224         &dev_attr_endo_id.attr,
225         &dev_attr_ap_intf_id.attr,
226         &dev_attr_intf_eject.attr,
227         &dev_attr_watchdog.attr,
228         NULL,
229 };
230 ATTRIBUTE_GROUPS(svc);
231
232 int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
233 {
234         struct gb_svc_intf_device_id_request request;
235
236         request.intf_id = intf_id;
237         request.device_id = device_id;
238
239         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
240                                  &request, sizeof(request), NULL, 0);
241 }
242
243 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
244 {
245         struct gb_svc_intf_eject_request request;
246         int ret;
247
248         request.intf_id = intf_id;
249
250         /*
251          * The pulse width for module release in svc is long so we need to
252          * increase the timeout so the operation will not return to soon.
253          */
254         ret = gb_operation_sync_timeout(svc->connection,
255                                         GB_SVC_TYPE_INTF_EJECT, &request,
256                                         sizeof(request), NULL, 0,
257                                         SVC_INTF_EJECT_TIMEOUT);
258         if (ret) {
259                 dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
260                 return ret;
261         }
262
263         return 0;
264 }
265
266 int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
267 {
268         struct gb_svc_intf_vsys_request request;
269         struct gb_svc_intf_vsys_response response;
270         int type, ret;
271
272         request.intf_id = intf_id;
273
274         if (enable)
275                 type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
276         else
277                 type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
278
279         ret = gb_operation_sync(svc->connection, type,
280                         &request, sizeof(request),
281                         &response, sizeof(response));
282         if (ret < 0)
283                 return ret;
284         if (response.result_code != GB_SVC_INTF_VSYS_OK)
285                 return -EREMOTEIO;
286         return 0;
287 }
288
289 int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
290 {
291         struct gb_svc_intf_refclk_request request;
292         struct gb_svc_intf_refclk_response response;
293         int type, ret;
294
295         request.intf_id = intf_id;
296
297         if (enable)
298                 type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
299         else
300                 type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
301
302         ret = gb_operation_sync(svc->connection, type,
303                         &request, sizeof(request),
304                         &response, sizeof(response));
305         if (ret < 0)
306                 return ret;
307         if (response.result_code != GB_SVC_INTF_REFCLK_OK)
308                 return -EREMOTEIO;
309         return 0;
310 }
311
312 int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
313 {
314         struct gb_svc_intf_unipro_request request;
315         struct gb_svc_intf_unipro_response response;
316         int type, ret;
317
318         request.intf_id = intf_id;
319
320         if (enable)
321                 type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
322         else
323                 type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
324
325         ret = gb_operation_sync(svc->connection, type,
326                         &request, sizeof(request),
327                         &response, sizeof(response));
328         if (ret < 0)
329                 return ret;
330         if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
331                 return -EREMOTEIO;
332         return 0;
333 }
334
335 int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
336 {
337         struct gb_svc_intf_activate_request request;
338         struct gb_svc_intf_activate_response response;
339         int ret;
340
341         request.intf_id = intf_id;
342
343         ret = gb_operation_sync_timeout(svc->connection,
344                         GB_SVC_TYPE_INTF_ACTIVATE,
345                         &request, sizeof(request),
346                         &response, sizeof(response),
347                         SVC_INTF_ACTIVATE_TIMEOUT);
348         if (ret < 0)
349                 return ret;
350         if (response.status != GB_SVC_OP_SUCCESS) {
351                 dev_err(&svc->dev, "failed to activate interface %u: %u\n",
352                                 intf_id, response.status);
353                 return -EREMOTEIO;
354         }
355
356         *intf_type = response.intf_type;
357
358         return 0;
359 }
360
361 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
362                         u32 *value)
363 {
364         struct gb_svc_dme_peer_get_request request;
365         struct gb_svc_dme_peer_get_response response;
366         u16 result;
367         int ret;
368
369         request.intf_id = intf_id;
370         request.attr = cpu_to_le16(attr);
371         request.selector = cpu_to_le16(selector);
372
373         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
374                                 &request, sizeof(request),
375                                 &response, sizeof(response));
376         if (ret) {
377                 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
378                                 intf_id, attr, selector, ret);
379                 return ret;
380         }
381
382         result = le16_to_cpu(response.result_code);
383         if (result) {
384                 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
385                                 intf_id, attr, selector, result);
386                 return -EREMOTEIO;
387         }
388
389         if (value)
390                 *value = le32_to_cpu(response.attr_value);
391
392         return 0;
393 }
394 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
395
396 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
397                         u32 value)
398 {
399         struct gb_svc_dme_peer_set_request request;
400         struct gb_svc_dme_peer_set_response response;
401         u16 result;
402         int ret;
403
404         request.intf_id = intf_id;
405         request.attr = cpu_to_le16(attr);
406         request.selector = cpu_to_le16(selector);
407         request.value = cpu_to_le32(value);
408
409         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
410                                 &request, sizeof(request),
411                                 &response, sizeof(response));
412         if (ret) {
413                 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
414                                 intf_id, attr, selector, value, ret);
415                 return ret;
416         }
417
418         result = le16_to_cpu(response.result_code);
419         if (result) {
420                 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
421                                 intf_id, attr, selector, value, result);
422                 return -EREMOTEIO;
423         }
424
425         return 0;
426 }
427 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
428
429 int gb_svc_connection_create(struct gb_svc *svc,
430                                 u8 intf1_id, u16 cport1_id,
431                                 u8 intf2_id, u16 cport2_id,
432                                 u8 cport_flags)
433 {
434         struct gb_svc_conn_create_request request;
435
436         request.intf1_id = intf1_id;
437         request.cport1_id = cpu_to_le16(cport1_id);
438         request.intf2_id = intf2_id;
439         request.cport2_id = cpu_to_le16(cport2_id);
440         request.tc = 0;         /* TC0 */
441         request.flags = cport_flags;
442
443         return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
444                                  &request, sizeof(request), NULL, 0);
445 }
446 EXPORT_SYMBOL_GPL(gb_svc_connection_create);
447
448 void gb_svc_connection_quiescing(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
449                                         u8 intf2_id, u16 cport2_id)
450 {
451         struct gb_svc_conn_quiescing_request request;
452         struct gb_svc_conn_quiescing_response response;
453         int ret;
454
455         dev_dbg(&svc->dev, "%s - (%u:%u %u:%u)\n", __func__,
456                                 intf1_id, cport1_id, intf2_id, cport2_id);
457
458         request.intf1_id = intf1_id;
459         request.cport1_id = cpu_to_le16(cport1_id);
460         request.intf2_id = intf2_id;
461         request.cport2_id = cpu_to_le16(cport2_id);
462
463         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_QUIESCING,
464                                  &request, sizeof(request),
465                                  &response, sizeof(response));
466         if (ret < 0)
467                 return;
468         if (response.status != GB_SVC_OP_SUCCESS) {
469                 dev_err(&svc->dev, "quiescing connection failed (%u:%u %u:%u): %u\n",
470                                 intf1_id, cport1_id, intf2_id, cport2_id,
471                                 response.status);
472                 return;
473         }
474
475         return;
476 }
477 EXPORT_SYMBOL_GPL(gb_svc_connection_quiescing);
478
479 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
480                                u8 intf2_id, u16 cport2_id)
481 {
482         struct gb_svc_conn_destroy_request request;
483         struct gb_connection *connection = svc->connection;
484         int ret;
485
486         request.intf1_id = intf1_id;
487         request.cport1_id = cpu_to_le16(cport1_id);
488         request.intf2_id = intf2_id;
489         request.cport2_id = cpu_to_le16(cport2_id);
490
491         ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
492                                 &request, sizeof(request), NULL, 0);
493         if (ret) {
494                 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
495                                 intf1_id, cport1_id, intf2_id, cport2_id, ret);
496         }
497 }
498 EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
499
500 int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
501                            u32 strobe_delay, u32 refclk)
502 {
503         struct gb_connection *connection = svc->connection;
504         struct gb_svc_timesync_enable_request request;
505
506         request.count = count;
507         request.frame_time = cpu_to_le64(frame_time);
508         request.strobe_delay = cpu_to_le32(strobe_delay);
509         request.refclk = cpu_to_le32(refclk);
510         return gb_operation_sync(connection,
511                                  GB_SVC_TYPE_TIMESYNC_ENABLE,
512                                  &request, sizeof(request), NULL, 0);
513 }
514 EXPORT_SYMBOL_GPL(gb_svc_timesync_enable);
515
516 int gb_svc_timesync_disable(struct gb_svc *svc)
517 {
518         struct gb_connection *connection = svc->connection;
519
520         return gb_operation_sync(connection,
521                                  GB_SVC_TYPE_TIMESYNC_DISABLE,
522                                  NULL, 0, NULL, 0);
523 }
524 EXPORT_SYMBOL_GPL(gb_svc_timesync_disable);
525
526 int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time)
527 {
528         struct gb_connection *connection = svc->connection;
529         struct gb_svc_timesync_authoritative_response response;
530         int ret, i;
531
532         ret = gb_operation_sync(connection,
533                                 GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE, NULL, 0,
534                                 &response, sizeof(response));
535         if (ret < 0)
536                 return ret;
537
538         for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
539                 frame_time[i] = le64_to_cpu(response.frame_time[i]);
540         return 0;
541 }
542 EXPORT_SYMBOL_GPL(gb_svc_timesync_authoritative);
543
544 int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time)
545 {
546         struct gb_connection *connection = svc->connection;
547         struct gb_svc_timesync_ping_response response;
548         int ret;
549
550         ret = gb_operation_sync(connection,
551                                 GB_SVC_TYPE_TIMESYNC_PING,
552                                 NULL, 0,
553                                 &response, sizeof(response));
554         if (ret < 0)
555                 return ret;
556
557         *frame_time = le64_to_cpu(response.frame_time);
558         return 0;
559 }
560 EXPORT_SYMBOL_GPL(gb_svc_timesync_ping);
561
562 int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask)
563 {
564         struct gb_connection *connection = svc->connection;
565         struct gb_svc_timesync_wake_pins_acquire_request request;
566
567         request.strobe_mask = cpu_to_le32(strobe_mask);
568         return gb_operation_sync(connection,
569                                  GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE,
570                                  &request, sizeof(request),
571                                  NULL, 0);
572 }
573 EXPORT_SYMBOL_GPL(gb_svc_timesync_wake_pins_acquire);
574
575 int gb_svc_timesync_wake_pins_release(struct gb_svc *svc)
576 {
577         struct gb_connection *connection = svc->connection;
578
579         return gb_operation_sync(connection,
580                                  GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE,
581                                  NULL, 0, NULL, 0);
582 }
583 EXPORT_SYMBOL_GPL(gb_svc_timesync_wake_pins_release);
584
585 /* Creates bi-directional routes between the devices */
586 int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
587                                u8 intf2_id, u8 dev2_id)
588 {
589         struct gb_svc_route_create_request request;
590
591         request.intf1_id = intf1_id;
592         request.dev1_id = dev1_id;
593         request.intf2_id = intf2_id;
594         request.dev2_id = dev2_id;
595
596         return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
597                                  &request, sizeof(request), NULL, 0);
598 }
599
600 /* Destroys bi-directional routes between the devices */
601 void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
602 {
603         struct gb_svc_route_destroy_request request;
604         int ret;
605
606         request.intf1_id = intf1_id;
607         request.intf2_id = intf2_id;
608
609         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
610                                 &request, sizeof(request), NULL, 0);
611         if (ret) {
612                 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
613                                 intf1_id, intf2_id, ret);
614         }
615 }
616
617 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
618                                u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
619                                u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
620                                u8 flags, u32 quirks)
621 {
622         struct gb_svc_intf_set_pwrm_request request;
623         struct gb_svc_intf_set_pwrm_response response;
624         int ret;
625
626         request.intf_id = intf_id;
627         request.hs_series = hs_series;
628         request.tx_mode = tx_mode;
629         request.tx_gear = tx_gear;
630         request.tx_nlanes = tx_nlanes;
631         request.rx_mode = rx_mode;
632         request.rx_gear = rx_gear;
633         request.rx_nlanes = rx_nlanes;
634         request.flags = flags;
635         request.quirks = cpu_to_le32(quirks);
636
637         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
638                                 &request, sizeof(request),
639                                 &response, sizeof(response));
640         if (ret < 0)
641                 return ret;
642
643         return le16_to_cpu(response.result_code);
644 }
645 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
646
647 int gb_svc_ping(struct gb_svc *svc)
648 {
649         return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
650                                          NULL, 0, NULL, 0,
651                                          GB_OPERATION_TIMEOUT_DEFAULT * 2);
652 }
653 EXPORT_SYMBOL_GPL(gb_svc_ping);
654
655 static int gb_svc_version_request(struct gb_operation *op)
656 {
657         struct gb_connection *connection = op->connection;
658         struct gb_svc *svc = gb_connection_get_data(connection);
659         struct gb_svc_version_request *request;
660         struct gb_svc_version_response *response;
661
662         if (op->request->payload_size < sizeof(*request)) {
663                 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
664                                 op->request->payload_size,
665                                 sizeof(*request));
666                 return -EINVAL;
667         }
668
669         request = op->request->payload;
670
671         if (request->major > GB_SVC_VERSION_MAJOR) {
672                 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
673                                 request->major, GB_SVC_VERSION_MAJOR);
674                 return -ENOTSUPP;
675         }
676
677         svc->protocol_major = request->major;
678         svc->protocol_minor = request->minor;
679
680         if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
681                 return -ENOMEM;
682
683         response = op->response->payload;
684         response->major = svc->protocol_major;
685         response->minor = svc->protocol_minor;
686
687         return 0;
688 }
689
690 static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
691                                         size_t len, loff_t *offset)
692 {
693         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
694         struct gb_svc *svc = pwrmon_rails->svc;
695         int ret, desc;
696         u32 value;
697         char buff[16];
698
699         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
700                                        GB_SVC_PWRMON_TYPE_VOL, &value);
701         if (ret) {
702                 dev_err(&svc->dev,
703                         "failed to get voltage sample %u: %d\n",
704                         pwrmon_rails->id, ret);
705                 return ret;
706         }
707
708         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
709
710         return simple_read_from_buffer(buf, len, offset, buff, desc);
711 }
712
713 static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
714                                         size_t len, loff_t *offset)
715 {
716         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
717         struct gb_svc *svc = pwrmon_rails->svc;
718         int ret, desc;
719         u32 value;
720         char buff[16];
721
722         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
723                                        GB_SVC_PWRMON_TYPE_CURR, &value);
724         if (ret) {
725                 dev_err(&svc->dev,
726                         "failed to get current sample %u: %d\n",
727                         pwrmon_rails->id, ret);
728                 return ret;
729         }
730
731         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
732
733         return simple_read_from_buffer(buf, len, offset, buff, desc);
734 }
735
736 static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
737                                       size_t len, loff_t *offset)
738 {
739         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
740         struct gb_svc *svc = pwrmon_rails->svc;
741         int ret, desc;
742         u32 value;
743         char buff[16];
744
745         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
746                                        GB_SVC_PWRMON_TYPE_PWR, &value);
747         if (ret) {
748                 dev_err(&svc->dev, "failed to get power sample %u: %d\n",
749                         pwrmon_rails->id, ret);
750                 return ret;
751         }
752
753         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
754
755         return simple_read_from_buffer(buf, len, offset, buff, desc);
756 }
757
758 static const struct file_operations pwrmon_debugfs_voltage_fops = {
759         .read           = pwr_debugfs_voltage_read,
760 };
761
762 static const struct file_operations pwrmon_debugfs_current_fops = {
763         .read           = pwr_debugfs_current_read,
764 };
765
766 static const struct file_operations pwrmon_debugfs_power_fops = {
767         .read           = pwr_debugfs_power_read,
768 };
769
770 static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
771 {
772         int i;
773         size_t bufsize;
774         struct dentry *dent;
775         struct gb_svc_pwrmon_rail_names_get_response *rail_names;
776         u8 rail_count;
777
778         dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
779         if (IS_ERR_OR_NULL(dent))
780                 return;
781
782         if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
783                 goto err_pwrmon_debugfs;
784
785         if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
786                 goto err_pwrmon_debugfs;
787
788         bufsize = sizeof(*rail_names) +
789                 GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
790
791         rail_names = kzalloc(bufsize, GFP_KERNEL);
792         if (!rail_names)
793                 goto err_pwrmon_debugfs;
794
795         svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
796                                     GFP_KERNEL);
797         if (!svc->pwrmon_rails)
798                 goto err_pwrmon_debugfs_free;
799
800         if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
801                 goto err_pwrmon_debugfs_free;
802
803         for (i = 0; i < rail_count; i++) {
804                 struct dentry *dir;
805                 struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
806                 char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
807
808                 snprintf(fname, sizeof(fname), "%s",
809                          (char *)&rail_names->name[i]);
810
811                 rail->id = i;
812                 rail->svc = svc;
813
814                 dir = debugfs_create_dir(fname, dent);
815                 debugfs_create_file("voltage_now", S_IRUGO, dir, rail,
816                                     &pwrmon_debugfs_voltage_fops);
817                 debugfs_create_file("current_now", S_IRUGO, dir, rail,
818                                     &pwrmon_debugfs_current_fops);
819                 debugfs_create_file("power_now", S_IRUGO, dir, rail,
820                                     &pwrmon_debugfs_power_fops);
821         }
822
823         kfree(rail_names);
824         return;
825
826 err_pwrmon_debugfs_free:
827         kfree(rail_names);
828         kfree(svc->pwrmon_rails);
829         svc->pwrmon_rails = NULL;
830
831 err_pwrmon_debugfs:
832         debugfs_remove(dent);
833 }
834
835 static void gb_svc_debugfs_init(struct gb_svc *svc)
836 {
837         svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
838                                                  gb_debugfs_get());
839         gb_svc_pwrmon_debugfs_init(svc);
840 }
841
842 static void gb_svc_debugfs_exit(struct gb_svc *svc)
843 {
844         debugfs_remove_recursive(svc->debugfs_dentry);
845         kfree(svc->pwrmon_rails);
846         svc->pwrmon_rails = NULL;
847 }
848
849 static int gb_svc_hello(struct gb_operation *op)
850 {
851         struct gb_connection *connection = op->connection;
852         struct gb_svc *svc = gb_connection_get_data(connection);
853         struct gb_svc_hello_request *hello_request;
854         int ret;
855
856         if (op->request->payload_size < sizeof(*hello_request)) {
857                 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
858                                 op->request->payload_size,
859                                 sizeof(*hello_request));
860                 return -EINVAL;
861         }
862
863         hello_request = op->request->payload;
864         svc->endo_id = le16_to_cpu(hello_request->endo_id);
865         svc->ap_intf_id = hello_request->interface_id;
866
867         ret = device_add(&svc->dev);
868         if (ret) {
869                 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
870                 return ret;
871         }
872
873         ret = input_register_device(svc->input);
874         if (ret) {
875                 dev_err(&svc->dev, "failed to register input: %d\n", ret);
876                 device_del(&svc->dev);
877                 return ret;
878         }
879
880         ret = gb_svc_watchdog_create(svc);
881         if (ret) {
882                 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
883                 goto err_unregister_device;
884         }
885
886         gb_svc_debugfs_init(svc);
887
888         ret = gb_timesync_svc_add(svc);
889         if (ret) {
890                 dev_err(&svc->dev, "failed to add SVC to timesync: %d\n", ret);
891                 gb_svc_debugfs_exit(svc);
892                 goto err_unregister_device;
893         }
894
895         return gb_svc_queue_deferred_request(op);
896
897 err_unregister_device:
898         gb_svc_watchdog_destroy(svc);
899         input_unregister_device(svc->input);
900         device_del(&svc->dev);
901         return ret;
902 }
903
904 static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
905                                                         u8 intf_id)
906 {
907         struct gb_host_device *hd = svc->hd;
908         struct gb_module *module;
909         size_t num_interfaces;
910         u8 module_id;
911
912         list_for_each_entry(module, &hd->modules, hd_node) {
913                 module_id = module->module_id;
914                 num_interfaces = module->num_interfaces;
915
916                 if (intf_id >= module_id &&
917                                 intf_id < module_id + num_interfaces) {
918                         return module->interfaces[intf_id - module_id];
919                 }
920         }
921
922         return NULL;
923 }
924
925 static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
926 {
927         struct gb_host_device *hd = svc->hd;
928         struct gb_module *module;
929
930         list_for_each_entry(module, &hd->modules, hd_node) {
931                 if (module->module_id == module_id)
932                         return module;
933         }
934
935         return NULL;
936 }
937
938 static void gb_svc_process_hello_deferred(struct gb_operation *operation)
939 {
940         struct gb_connection *connection = operation->connection;
941         struct gb_svc *svc = gb_connection_get_data(connection);
942         int ret;
943
944         /*
945          * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
946          * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
947          * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
948          * module.
949          *
950          * The code should be removed once SW-2217, Heuristic for UniPro
951          * Power Mode Changes is resolved.
952          */
953         ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
954                                         GB_SVC_UNIPRO_HS_SERIES_A,
955                                         GB_SVC_UNIPRO_SLOW_AUTO_MODE,
956                                         2, 1,
957                                         GB_SVC_UNIPRO_SLOW_AUTO_MODE,
958                                         2, 1,
959                                         0, 0);
960
961         if (ret)
962                 dev_warn(&svc->dev,
963                         "power mode change failed on AP to switch link: %d\n",
964                         ret);
965 }
966
967 static void gb_svc_process_module_inserted(struct gb_operation *operation)
968 {
969         struct gb_svc_module_inserted_request *request;
970         struct gb_connection *connection = operation->connection;
971         struct gb_svc *svc = gb_connection_get_data(connection);
972         struct gb_host_device *hd = svc->hd;
973         struct gb_module *module;
974         size_t num_interfaces;
975         u8 module_id;
976         u16 flags;
977         int ret;
978
979         /* The request message size has already been verified. */
980         request = operation->request->payload;
981         module_id = request->primary_intf_id;
982         num_interfaces = request->intf_count;
983         flags = le16_to_cpu(request->flags);
984
985         dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
986                         __func__, module_id, num_interfaces, flags);
987
988         if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
989                 dev_warn(&svc->dev, "no primary interface detected on module %u\n",
990                                 module_id);
991         }
992
993         module = gb_svc_module_lookup(svc, module_id);
994         if (module) {
995                 dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
996                                 module_id);
997                 return;
998         }
999
1000         module = gb_module_create(hd, module_id, num_interfaces);
1001         if (!module) {
1002                 dev_err(&svc->dev, "failed to create module\n");
1003                 return;
1004         }
1005
1006         ret = gb_module_add(module);
1007         if (ret) {
1008                 gb_module_put(module);
1009                 return;
1010         }
1011
1012         list_add(&module->hd_node, &hd->modules);
1013 }
1014
1015 static void gb_svc_process_module_removed(struct gb_operation *operation)
1016 {
1017         struct gb_svc_module_removed_request *request;
1018         struct gb_connection *connection = operation->connection;
1019         struct gb_svc *svc = gb_connection_get_data(connection);
1020         struct gb_module *module;
1021         u8 module_id;
1022
1023         /* The request message size has already been verified. */
1024         request = operation->request->payload;
1025         module_id = request->primary_intf_id;
1026
1027         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
1028
1029         module = gb_svc_module_lookup(svc, module_id);
1030         if (!module) {
1031                 dev_warn(&svc->dev, "unexpected module-removed event %u\n",
1032                                 module_id);
1033                 return;
1034         }
1035
1036         module->disconnected = true;
1037
1038         gb_module_del(module);
1039         list_del(&module->hd_node);
1040         gb_module_put(module);
1041 }
1042
1043 static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
1044 {
1045         struct gb_svc_intf_mailbox_event_request *request;
1046         struct gb_connection *connection = operation->connection;
1047         struct gb_svc *svc = gb_connection_get_data(connection);
1048         struct gb_interface *intf;
1049         u8 intf_id;
1050         u16 result_code;
1051         u32 mailbox;
1052
1053         /* The request message size has already been verified. */
1054         request = operation->request->payload;
1055         intf_id = request->intf_id;
1056         result_code = le16_to_cpu(request->result_code);
1057         mailbox = le32_to_cpu(request->mailbox);
1058
1059         dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
1060                         __func__, intf_id, result_code, mailbox);
1061
1062         intf = gb_svc_interface_lookup(svc, intf_id);
1063         if (!intf) {
1064                 dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
1065                 return;
1066         }
1067
1068         gb_interface_mailbox_event(intf, result_code, mailbox);
1069 }
1070
1071 static void gb_svc_process_deferred_request(struct work_struct *work)
1072 {
1073         struct gb_svc_deferred_request *dr;
1074         struct gb_operation *operation;
1075         struct gb_svc *svc;
1076         u8 type;
1077
1078         dr = container_of(work, struct gb_svc_deferred_request, work);
1079         operation = dr->operation;
1080         svc = gb_connection_get_data(operation->connection);
1081         type = operation->request->header->type;
1082
1083         switch (type) {
1084         case GB_SVC_TYPE_SVC_HELLO:
1085                 gb_svc_process_hello_deferred(operation);
1086                 break;
1087         case GB_SVC_TYPE_MODULE_INSERTED:
1088                 gb_svc_process_module_inserted(operation);
1089                 break;
1090         case GB_SVC_TYPE_MODULE_REMOVED:
1091                 gb_svc_process_module_removed(operation);
1092                 break;
1093         case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1094                 gb_svc_process_intf_mailbox_event(operation);
1095                 break;
1096         default:
1097                 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
1098         }
1099
1100         gb_operation_put(operation);
1101         kfree(dr);
1102 }
1103
1104 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
1105 {
1106         struct gb_svc *svc = gb_connection_get_data(operation->connection);
1107         struct gb_svc_deferred_request *dr;
1108
1109         dr = kmalloc(sizeof(*dr), GFP_KERNEL);
1110         if (!dr)
1111                 return -ENOMEM;
1112
1113         gb_operation_get(operation);
1114
1115         dr->operation = operation;
1116         INIT_WORK(&dr->work, gb_svc_process_deferred_request);
1117
1118         queue_work(svc->wq, &dr->work);
1119
1120         return 0;
1121 }
1122
1123 static int gb_svc_intf_reset_recv(struct gb_operation *op)
1124 {
1125         struct gb_svc *svc = gb_connection_get_data(op->connection);
1126         struct gb_message *request = op->request;
1127         struct gb_svc_intf_reset_request *reset;
1128         u8 intf_id;
1129
1130         if (request->payload_size < sizeof(*reset)) {
1131                 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
1132                                 request->payload_size, sizeof(*reset));
1133                 return -EINVAL;
1134         }
1135         reset = request->payload;
1136
1137         intf_id = reset->intf_id;
1138
1139         /* FIXME Reset the interface here */
1140
1141         return 0;
1142 }
1143
1144 static int gb_svc_key_code_map(struct gb_svc *svc, u16 key_code, u16 *code)
1145 {
1146         switch (key_code) {
1147         case GB_KEYCODE_ARA:
1148                 *code = SVC_KEY_ARA_BUTTON;
1149                 break;
1150         default:
1151                 dev_warn(&svc->dev, "unknown keycode received: %u\n", key_code);
1152                 return -EINVAL;
1153         }
1154
1155         return 0;
1156 }
1157
1158 static int gb_svc_key_event_recv(struct gb_operation *op)
1159 {
1160         struct gb_svc *svc = gb_connection_get_data(op->connection);
1161         struct gb_message *request = op->request;
1162         struct gb_svc_key_event_request *key;
1163         u16 code;
1164         u8 event;
1165         int ret;
1166
1167         if (request->payload_size < sizeof(*key)) {
1168                 dev_warn(&svc->dev, "short key request received (%zu < %zu)\n",
1169                          request->payload_size, sizeof(*key));
1170                 return -EINVAL;
1171         }
1172
1173         key = request->payload;
1174
1175         ret = gb_svc_key_code_map(svc, le16_to_cpu(key->key_code), &code);
1176         if (ret < 0)
1177                 return ret;
1178
1179         event = key->key_event;
1180         if ((event != GB_SVC_KEY_PRESSED) && (event != GB_SVC_KEY_RELEASED)) {
1181                 dev_warn(&svc->dev, "unknown key event received: %u\n", event);
1182                 return -EINVAL;
1183         }
1184
1185         input_report_key(svc->input, code, (event == GB_SVC_KEY_PRESSED));
1186         input_sync(svc->input);
1187
1188         return 0;
1189 }
1190
1191 static int gb_svc_module_inserted_recv(struct gb_operation *op)
1192 {
1193         struct gb_svc *svc = gb_connection_get_data(op->connection);
1194         struct gb_svc_module_inserted_request *request;
1195
1196         if (op->request->payload_size < sizeof(*request)) {
1197                 dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
1198                                 op->request->payload_size, sizeof(*request));
1199                 return -EINVAL;
1200         }
1201
1202         request = op->request->payload;
1203
1204         dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1205                         request->primary_intf_id);
1206
1207         return gb_svc_queue_deferred_request(op);
1208 }
1209
1210 static int gb_svc_module_removed_recv(struct gb_operation *op)
1211 {
1212         struct gb_svc *svc = gb_connection_get_data(op->connection);
1213         struct gb_svc_module_removed_request *request;
1214
1215         if (op->request->payload_size < sizeof(*request)) {
1216                 dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
1217                                 op->request->payload_size, sizeof(*request));
1218                 return -EINVAL;
1219         }
1220
1221         request = op->request->payload;
1222
1223         dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1224                         request->primary_intf_id);
1225
1226         return gb_svc_queue_deferred_request(op);
1227 }
1228
1229 static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
1230 {
1231         struct gb_svc *svc = gb_connection_get_data(op->connection);
1232         struct gb_svc_intf_mailbox_event_request *request;
1233
1234         if (op->request->payload_size < sizeof(*request)) {
1235                 dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
1236                                 op->request->payload_size, sizeof(*request));
1237                 return -EINVAL;
1238         }
1239
1240         request = op->request->payload;
1241
1242         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1243
1244         return gb_svc_queue_deferred_request(op);
1245 }
1246
1247 static int gb_svc_request_handler(struct gb_operation *op)
1248 {
1249         struct gb_connection *connection = op->connection;
1250         struct gb_svc *svc = gb_connection_get_data(connection);
1251         u8 type = op->type;
1252         int ret = 0;
1253
1254         /*
1255          * SVC requests need to follow a specific order (at least initially) and
1256          * below code takes care of enforcing that. The expected order is:
1257          * - PROTOCOL_VERSION
1258          * - SVC_HELLO
1259          * - Any other request, but the earlier two.
1260          *
1261          * Incoming requests are guaranteed to be serialized and so we don't
1262          * need to protect 'state' for any races.
1263          */
1264         switch (type) {
1265         case GB_SVC_TYPE_PROTOCOL_VERSION:
1266                 if (svc->state != GB_SVC_STATE_RESET)
1267                         ret = -EINVAL;
1268                 break;
1269         case GB_SVC_TYPE_SVC_HELLO:
1270                 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
1271                         ret = -EINVAL;
1272                 break;
1273         default:
1274                 if (svc->state != GB_SVC_STATE_SVC_HELLO)
1275                         ret = -EINVAL;
1276                 break;
1277         }
1278
1279         if (ret) {
1280                 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
1281                                 type, svc->state);
1282                 return ret;
1283         }
1284
1285         switch (type) {
1286         case GB_SVC_TYPE_PROTOCOL_VERSION:
1287                 ret = gb_svc_version_request(op);
1288                 if (!ret)
1289                         svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
1290                 return ret;
1291         case GB_SVC_TYPE_SVC_HELLO:
1292                 ret = gb_svc_hello(op);
1293                 if (!ret)
1294                         svc->state = GB_SVC_STATE_SVC_HELLO;
1295                 return ret;
1296         case GB_SVC_TYPE_INTF_RESET:
1297                 return gb_svc_intf_reset_recv(op);
1298         case GB_SVC_TYPE_KEY_EVENT:
1299                 return gb_svc_key_event_recv(op);
1300         case GB_SVC_TYPE_MODULE_INSERTED:
1301                 return gb_svc_module_inserted_recv(op);
1302         case GB_SVC_TYPE_MODULE_REMOVED:
1303                 return gb_svc_module_removed_recv(op);
1304         case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1305                 return gb_svc_intf_mailbox_event_recv(op);
1306         default:
1307                 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
1308                 return -EINVAL;
1309         }
1310 }
1311
1312 static struct input_dev *gb_svc_input_create(struct gb_svc *svc)
1313 {
1314         struct input_dev *input_dev;
1315
1316         input_dev = input_allocate_device();
1317         if (!input_dev)
1318                 return ERR_PTR(-ENOMEM);
1319
1320         input_dev->name = dev_name(&svc->dev);
1321         svc->input_phys = kasprintf(GFP_KERNEL, "greybus-%s/input0",
1322                                     input_dev->name);
1323         if (!svc->input_phys)
1324                 goto err_free_input;
1325
1326         input_dev->phys = svc->input_phys;
1327         input_dev->dev.parent = &svc->dev;
1328
1329         input_set_drvdata(input_dev, svc);
1330
1331         input_set_capability(input_dev, EV_KEY, SVC_KEY_ARA_BUTTON);
1332
1333         return input_dev;
1334
1335 err_free_input:
1336         input_free_device(svc->input);
1337         return ERR_PTR(-ENOMEM);
1338 }
1339
1340 static void gb_svc_release(struct device *dev)
1341 {
1342         struct gb_svc *svc = to_gb_svc(dev);
1343
1344         if (svc->connection)
1345                 gb_connection_destroy(svc->connection);
1346         ida_destroy(&svc->device_id_map);
1347         destroy_workqueue(svc->wq);
1348         kfree(svc->input_phys);
1349         kfree(svc);
1350 }
1351
1352 struct device_type greybus_svc_type = {
1353         .name           = "greybus_svc",
1354         .release        = gb_svc_release,
1355 };
1356
1357 struct gb_svc *gb_svc_create(struct gb_host_device *hd)
1358 {
1359         struct gb_svc *svc;
1360
1361         svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1362         if (!svc)
1363                 return NULL;
1364
1365         svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
1366         if (!svc->wq) {
1367                 kfree(svc);
1368                 return NULL;
1369         }
1370
1371         svc->dev.parent = &hd->dev;
1372         svc->dev.bus = &greybus_bus_type;
1373         svc->dev.type = &greybus_svc_type;
1374         svc->dev.groups = svc_groups;
1375         svc->dev.dma_mask = svc->dev.parent->dma_mask;
1376         device_initialize(&svc->dev);
1377
1378         dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
1379
1380         ida_init(&svc->device_id_map);
1381         svc->state = GB_SVC_STATE_RESET;
1382         svc->hd = hd;
1383
1384         svc->input = gb_svc_input_create(svc);
1385         if (IS_ERR(svc->input)) {
1386                 dev_err(&svc->dev, "failed to create input device: %ld\n",
1387                         PTR_ERR(svc->input));
1388                 goto err_put_device;
1389         }
1390
1391         svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
1392                                                 gb_svc_request_handler);
1393         if (IS_ERR(svc->connection)) {
1394                 dev_err(&svc->dev, "failed to create connection: %ld\n",
1395                                 PTR_ERR(svc->connection));
1396                 goto err_free_input;
1397         }
1398
1399         gb_connection_set_data(svc->connection, svc);
1400
1401         return svc;
1402
1403 err_free_input:
1404         input_free_device(svc->input);
1405 err_put_device:
1406         put_device(&svc->dev);
1407         return NULL;
1408 }
1409
1410 int gb_svc_add(struct gb_svc *svc)
1411 {
1412         int ret;
1413
1414         /*
1415          * The SVC protocol is currently driven by the SVC, so the SVC device
1416          * is added from the connection request handler when enough
1417          * information has been received.
1418          */
1419         ret = gb_connection_enable(svc->connection);
1420         if (ret)
1421                 return ret;
1422
1423         return 0;
1424 }
1425
1426 static void gb_svc_remove_modules(struct gb_svc *svc)
1427 {
1428         struct gb_host_device *hd = svc->hd;
1429         struct gb_module *module, *tmp;
1430
1431         list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
1432                 gb_module_del(module);
1433                 list_del(&module->hd_node);
1434                 gb_module_put(module);
1435         }
1436 }
1437
1438 void gb_svc_del(struct gb_svc *svc)
1439 {
1440         gb_connection_disable(svc->connection);
1441
1442         /*
1443          * The SVC device and input device may have been registered
1444          * from the request handler.
1445          */
1446         if (device_is_registered(&svc->dev)) {
1447                 gb_timesync_svc_remove(svc);
1448                 gb_svc_debugfs_exit(svc);
1449                 gb_svc_watchdog_destroy(svc);
1450                 input_unregister_device(svc->input);
1451                 device_del(&svc->dev);
1452         }
1453
1454         flush_workqueue(svc->wq);
1455
1456         gb_svc_remove_modules(svc);
1457 }
1458
1459 void gb_svc_put(struct gb_svc *svc)
1460 {
1461         put_device(&svc->dev);
1462 }