c5aedd55e90d31d5c16c40e149cb003f9e8b77f6
[cascardo/linux.git] / drivers / staging / greybus / svc.c
1 /*
2  * SVC Greybus driver.
3  *
4  * Copyright 2015 Google Inc.
5  * Copyright 2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/debugfs.h>
11 #include <linux/workqueue.h>
12
13 #include "greybus.h"
14
15 #define SVC_INTF_EJECT_TIMEOUT          9000
16 #define SVC_INTF_ACTIVATE_TIMEOUT       6000
17 #define SVC_INTF_RESUME_TIMEOUT 3000
18
19 struct gb_svc_deferred_request {
20         struct work_struct work;
21         struct gb_operation *operation;
22 };
23
24
25 static int gb_svc_queue_deferred_request(struct gb_operation *operation);
26
27 static ssize_t endo_id_show(struct device *dev,
28                         struct device_attribute *attr, char *buf)
29 {
30         struct gb_svc *svc = to_gb_svc(dev);
31
32         return sprintf(buf, "0x%04x\n", svc->endo_id);
33 }
34 static DEVICE_ATTR_RO(endo_id);
35
36 static ssize_t ap_intf_id_show(struct device *dev,
37                         struct device_attribute *attr, char *buf)
38 {
39         struct gb_svc *svc = to_gb_svc(dev);
40
41         return sprintf(buf, "%u\n", svc->ap_intf_id);
42 }
43 static DEVICE_ATTR_RO(ap_intf_id);
44
45 // FIXME
46 // This is a hack, we need to do this "right" and clean the interface up
47 // properly, not just forcibly yank the thing out of the system and hope for the
48 // best.  But for now, people want their modules to come out without having to
49 // throw the thing to the ground or get out a screwdriver.
50 static ssize_t intf_eject_store(struct device *dev,
51                                 struct device_attribute *attr, const char *buf,
52                                 size_t len)
53 {
54         struct gb_svc *svc = to_gb_svc(dev);
55         unsigned short intf_id;
56         int ret;
57
58         ret = kstrtou16(buf, 10, &intf_id);
59         if (ret < 0)
60                 return ret;
61
62         dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
63
64         ret = gb_svc_intf_eject(svc, intf_id);
65         if (ret < 0)
66                 return ret;
67
68         return len;
69 }
70 static DEVICE_ATTR_WO(intf_eject);
71
72 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
73                              char *buf)
74 {
75         struct gb_svc *svc = to_gb_svc(dev);
76
77         return sprintf(buf, "%s\n",
78                        gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
79 }
80
81 static ssize_t watchdog_store(struct device *dev,
82                               struct device_attribute *attr, const char *buf,
83                               size_t len)
84 {
85         struct gb_svc *svc = to_gb_svc(dev);
86         int retval;
87         bool user_request;
88
89         retval = strtobool(buf, &user_request);
90         if (retval)
91                 return retval;
92
93         if (user_request)
94                 retval = gb_svc_watchdog_enable(svc);
95         else
96                 retval = gb_svc_watchdog_disable(svc);
97         if (retval)
98                 return retval;
99         return len;
100 }
101 static DEVICE_ATTR_RW(watchdog);
102
103 static ssize_t watchdog_action_show(struct device *dev,
104                                     struct device_attribute *attr, char *buf)
105 {
106         struct gb_svc *svc = to_gb_svc(dev);
107
108         if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
109                 return sprintf(buf, "panic\n");
110         else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
111                 return sprintf(buf, "reset\n");
112
113         return -EINVAL;
114 }
115
116 static ssize_t watchdog_action_store(struct device *dev,
117                                      struct device_attribute *attr,
118                                      const char *buf, size_t len)
119 {
120         struct gb_svc *svc = to_gb_svc(dev);
121
122         if (sysfs_streq(buf, "panic"))
123                 svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
124         else if (sysfs_streq(buf, "reset"))
125                 svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
126         else
127                 return -EINVAL;
128
129         return len;
130 }
131 static DEVICE_ATTR_RW(watchdog_action);
132
133 static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
134 {
135         struct gb_svc_pwrmon_rail_count_get_response response;
136         int ret;
137
138         ret = gb_operation_sync(svc->connection,
139                                 GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
140                                 &response, sizeof(response));
141         if (ret) {
142                 dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
143                 return ret;
144         }
145
146         *value = response.rail_count;
147
148         return 0;
149 }
150
151 static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
152                 struct gb_svc_pwrmon_rail_names_get_response *response,
153                 size_t bufsize)
154 {
155         int ret;
156
157         ret = gb_operation_sync(svc->connection,
158                                 GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
159                                 response, bufsize);
160         if (ret) {
161                 dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
162                 return ret;
163         }
164
165         if (response->status != GB_SVC_OP_SUCCESS) {
166                 dev_err(&svc->dev,
167                         "SVC error while getting rail names: %u\n",
168                         response->status);
169                 return -EREMOTEIO;
170         }
171
172         return 0;
173 }
174
175 static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
176                                     u8 measurement_type, u32 *value)
177 {
178         struct gb_svc_pwrmon_sample_get_request request;
179         struct gb_svc_pwrmon_sample_get_response response;
180         int ret;
181
182         request.rail_id = rail_id;
183         request.measurement_type = measurement_type;
184
185         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
186                                 &request, sizeof(request),
187                                 &response, sizeof(response));
188         if (ret) {
189                 dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
190                 return ret;
191         }
192
193         if (response.result) {
194                 dev_err(&svc->dev,
195                         "UniPro error while getting rail power sample (%d %d): %d\n",
196                         rail_id, measurement_type, response.result);
197                 switch (response.result) {
198                 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
199                         return -EINVAL;
200                 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
201                         return -ENOMSG;
202                 default:
203                         return -EREMOTEIO;
204                 }
205         }
206
207         *value = le32_to_cpu(response.measurement);
208
209         return 0;
210 }
211
212 int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
213                                   u8 measurement_type, u32 *value)
214 {
215         struct gb_svc_pwrmon_intf_sample_get_request request;
216         struct gb_svc_pwrmon_intf_sample_get_response response;
217         int ret;
218
219         request.intf_id = intf_id;
220         request.measurement_type = measurement_type;
221
222         ret = gb_operation_sync(svc->connection,
223                                 GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
224                                 &request, sizeof(request),
225                                 &response, sizeof(response));
226         if (ret) {
227                 dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
228                 return ret;
229         }
230
231         if (response.result) {
232                 dev_err(&svc->dev,
233                         "UniPro error while getting intf power sample (%d %d): %d\n",
234                         intf_id, measurement_type, response.result);
235                 switch (response.result) {
236                 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
237                         return -EINVAL;
238                 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
239                         return -ENOMSG;
240                 default:
241                         return -EREMOTEIO;
242                 }
243         }
244
245         *value = le32_to_cpu(response.measurement);
246
247         return 0;
248 }
249
250 static struct attribute *svc_attrs[] = {
251         &dev_attr_endo_id.attr,
252         &dev_attr_ap_intf_id.attr,
253         &dev_attr_intf_eject.attr,
254         &dev_attr_watchdog.attr,
255         &dev_attr_watchdog_action.attr,
256         NULL,
257 };
258 ATTRIBUTE_GROUPS(svc);
259
260 int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
261 {
262         struct gb_svc_intf_device_id_request request;
263
264         request.intf_id = intf_id;
265         request.device_id = device_id;
266
267         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
268                                  &request, sizeof(request), NULL, 0);
269 }
270
271 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
272 {
273         struct gb_svc_intf_eject_request request;
274         int ret;
275
276         request.intf_id = intf_id;
277
278         /*
279          * The pulse width for module release in svc is long so we need to
280          * increase the timeout so the operation will not return to soon.
281          */
282         ret = gb_operation_sync_timeout(svc->connection,
283                                         GB_SVC_TYPE_INTF_EJECT, &request,
284                                         sizeof(request), NULL, 0,
285                                         SVC_INTF_EJECT_TIMEOUT);
286         if (ret) {
287                 dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
288                 return ret;
289         }
290
291         return 0;
292 }
293
294 int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
295 {
296         struct gb_svc_intf_vsys_request request;
297         struct gb_svc_intf_vsys_response response;
298         int type, ret;
299
300         request.intf_id = intf_id;
301
302         if (enable)
303                 type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
304         else
305                 type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
306
307         ret = gb_operation_sync(svc->connection, type,
308                         &request, sizeof(request),
309                         &response, sizeof(response));
310         if (ret < 0)
311                 return ret;
312         if (response.result_code != GB_SVC_INTF_VSYS_OK)
313                 return -EREMOTEIO;
314         return 0;
315 }
316
317 int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
318 {
319         struct gb_svc_intf_refclk_request request;
320         struct gb_svc_intf_refclk_response response;
321         int type, ret;
322
323         request.intf_id = intf_id;
324
325         if (enable)
326                 type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
327         else
328                 type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
329
330         ret = gb_operation_sync(svc->connection, type,
331                         &request, sizeof(request),
332                         &response, sizeof(response));
333         if (ret < 0)
334                 return ret;
335         if (response.result_code != GB_SVC_INTF_REFCLK_OK)
336                 return -EREMOTEIO;
337         return 0;
338 }
339
340 int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
341 {
342         struct gb_svc_intf_unipro_request request;
343         struct gb_svc_intf_unipro_response response;
344         int type, ret;
345
346         request.intf_id = intf_id;
347
348         if (enable)
349                 type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
350         else
351                 type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
352
353         ret = gb_operation_sync(svc->connection, type,
354                         &request, sizeof(request),
355                         &response, sizeof(response));
356         if (ret < 0)
357                 return ret;
358         if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
359                 return -EREMOTEIO;
360         return 0;
361 }
362
363 int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
364 {
365         struct gb_svc_intf_activate_request request;
366         struct gb_svc_intf_activate_response response;
367         int ret;
368
369         request.intf_id = intf_id;
370
371         ret = gb_operation_sync_timeout(svc->connection,
372                         GB_SVC_TYPE_INTF_ACTIVATE,
373                         &request, sizeof(request),
374                         &response, sizeof(response),
375                         SVC_INTF_ACTIVATE_TIMEOUT);
376         if (ret < 0)
377                 return ret;
378         if (response.status != GB_SVC_OP_SUCCESS) {
379                 dev_err(&svc->dev, "failed to activate interface %u: %u\n",
380                                 intf_id, response.status);
381                 return -EREMOTEIO;
382         }
383
384         *intf_type = response.intf_type;
385
386         return 0;
387 }
388
389 int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
390 {
391         struct gb_svc_intf_resume_request request;
392         struct gb_svc_intf_resume_response response;
393         int ret;
394
395         request.intf_id = intf_id;
396
397         ret = gb_operation_sync_timeout(svc->connection,
398                                         GB_SVC_TYPE_INTF_RESUME,
399                                         &request, sizeof(request),
400                                         &response, sizeof(response),
401                                         SVC_INTF_RESUME_TIMEOUT);
402         if (ret < 0) {
403                 dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
404                         intf_id, ret);
405                 return ret;
406         }
407
408         if (response.status != GB_SVC_OP_SUCCESS) {
409                 dev_err(&svc->dev, "failed to resume interface %u: %u\n",
410                         intf_id, response.status);
411                 return -EREMOTEIO;
412         }
413
414         return 0;
415 }
416
417 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
418                         u32 *value)
419 {
420         struct gb_svc_dme_peer_get_request request;
421         struct gb_svc_dme_peer_get_response response;
422         u16 result;
423         int ret;
424
425         request.intf_id = intf_id;
426         request.attr = cpu_to_le16(attr);
427         request.selector = cpu_to_le16(selector);
428
429         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
430                                 &request, sizeof(request),
431                                 &response, sizeof(response));
432         if (ret) {
433                 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
434                                 intf_id, attr, selector, ret);
435                 return ret;
436         }
437
438         result = le16_to_cpu(response.result_code);
439         if (result) {
440                 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
441                                 intf_id, attr, selector, result);
442                 return -EREMOTEIO;
443         }
444
445         if (value)
446                 *value = le32_to_cpu(response.attr_value);
447
448         return 0;
449 }
450 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
451
452 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
453                         u32 value)
454 {
455         struct gb_svc_dme_peer_set_request request;
456         struct gb_svc_dme_peer_set_response response;
457         u16 result;
458         int ret;
459
460         request.intf_id = intf_id;
461         request.attr = cpu_to_le16(attr);
462         request.selector = cpu_to_le16(selector);
463         request.value = cpu_to_le32(value);
464
465         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
466                                 &request, sizeof(request),
467                                 &response, sizeof(response));
468         if (ret) {
469                 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
470                                 intf_id, attr, selector, value, ret);
471                 return ret;
472         }
473
474         result = le16_to_cpu(response.result_code);
475         if (result) {
476                 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
477                                 intf_id, attr, selector, value, result);
478                 return -EREMOTEIO;
479         }
480
481         return 0;
482 }
483 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
484
485 int gb_svc_connection_create(struct gb_svc *svc,
486                                 u8 intf1_id, u16 cport1_id,
487                                 u8 intf2_id, u16 cport2_id,
488                                 u8 cport_flags)
489 {
490         struct gb_svc_conn_create_request request;
491
492         request.intf1_id = intf1_id;
493         request.cport1_id = cpu_to_le16(cport1_id);
494         request.intf2_id = intf2_id;
495         request.cport2_id = cpu_to_le16(cport2_id);
496         request.tc = 0;         /* TC0 */
497         request.flags = cport_flags;
498
499         return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
500                                  &request, sizeof(request), NULL, 0);
501 }
502 EXPORT_SYMBOL_GPL(gb_svc_connection_create);
503
504 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
505                                u8 intf2_id, u16 cport2_id)
506 {
507         struct gb_svc_conn_destroy_request request;
508         struct gb_connection *connection = svc->connection;
509         int ret;
510
511         request.intf1_id = intf1_id;
512         request.cport1_id = cpu_to_le16(cport1_id);
513         request.intf2_id = intf2_id;
514         request.cport2_id = cpu_to_le16(cport2_id);
515
516         ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
517                                 &request, sizeof(request), NULL, 0);
518         if (ret) {
519                 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
520                                 intf1_id, cport1_id, intf2_id, cport2_id, ret);
521         }
522 }
523 EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
524
525 int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
526                            u32 strobe_delay, u32 refclk)
527 {
528         struct gb_connection *connection = svc->connection;
529         struct gb_svc_timesync_enable_request request;
530
531         request.count = count;
532         request.frame_time = cpu_to_le64(frame_time);
533         request.strobe_delay = cpu_to_le32(strobe_delay);
534         request.refclk = cpu_to_le32(refclk);
535         return gb_operation_sync(connection,
536                                  GB_SVC_TYPE_TIMESYNC_ENABLE,
537                                  &request, sizeof(request), NULL, 0);
538 }
539 EXPORT_SYMBOL_GPL(gb_svc_timesync_enable);
540
541 int gb_svc_timesync_disable(struct gb_svc *svc)
542 {
543         struct gb_connection *connection = svc->connection;
544
545         return gb_operation_sync(connection,
546                                  GB_SVC_TYPE_TIMESYNC_DISABLE,
547                                  NULL, 0, NULL, 0);
548 }
549 EXPORT_SYMBOL_GPL(gb_svc_timesync_disable);
550
551 int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time)
552 {
553         struct gb_connection *connection = svc->connection;
554         struct gb_svc_timesync_authoritative_response response;
555         int ret, i;
556
557         ret = gb_operation_sync(connection,
558                                 GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE, NULL, 0,
559                                 &response, sizeof(response));
560         if (ret < 0)
561                 return ret;
562
563         for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
564                 frame_time[i] = le64_to_cpu(response.frame_time[i]);
565         return 0;
566 }
567 EXPORT_SYMBOL_GPL(gb_svc_timesync_authoritative);
568
569 int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time)
570 {
571         struct gb_connection *connection = svc->connection;
572         struct gb_svc_timesync_ping_response response;
573         int ret;
574
575         ret = gb_operation_sync(connection,
576                                 GB_SVC_TYPE_TIMESYNC_PING,
577                                 NULL, 0,
578                                 &response, sizeof(response));
579         if (ret < 0)
580                 return ret;
581
582         *frame_time = le64_to_cpu(response.frame_time);
583         return 0;
584 }
585 EXPORT_SYMBOL_GPL(gb_svc_timesync_ping);
586
587 int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask)
588 {
589         struct gb_connection *connection = svc->connection;
590         struct gb_svc_timesync_wake_pins_acquire_request request;
591
592         request.strobe_mask = cpu_to_le32(strobe_mask);
593         return gb_operation_sync(connection,
594                                  GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE,
595                                  &request, sizeof(request),
596                                  NULL, 0);
597 }
598 EXPORT_SYMBOL_GPL(gb_svc_timesync_wake_pins_acquire);
599
600 int gb_svc_timesync_wake_pins_release(struct gb_svc *svc)
601 {
602         struct gb_connection *connection = svc->connection;
603
604         return gb_operation_sync(connection,
605                                  GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE,
606                                  NULL, 0, NULL, 0);
607 }
608 EXPORT_SYMBOL_GPL(gb_svc_timesync_wake_pins_release);
609
610 /* Creates bi-directional routes between the devices */
611 int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
612                                u8 intf2_id, u8 dev2_id)
613 {
614         struct gb_svc_route_create_request request;
615
616         request.intf1_id = intf1_id;
617         request.dev1_id = dev1_id;
618         request.intf2_id = intf2_id;
619         request.dev2_id = dev2_id;
620
621         return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
622                                  &request, sizeof(request), NULL, 0);
623 }
624
625 /* Destroys bi-directional routes between the devices */
626 void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
627 {
628         struct gb_svc_route_destroy_request request;
629         int ret;
630
631         request.intf1_id = intf1_id;
632         request.intf2_id = intf2_id;
633
634         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
635                                 &request, sizeof(request), NULL, 0);
636         if (ret) {
637                 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
638                                 intf1_id, intf2_id, ret);
639         }
640 }
641
642 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
643                                u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
644                                u8 tx_amplitude, u8 tx_hs_equalizer,
645                                u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
646                                u8 flags, u32 quirks,
647                                struct gb_svc_l2_timer_cfg *local,
648                                struct gb_svc_l2_timer_cfg *remote)
649 {
650         struct gb_svc_intf_set_pwrm_request request;
651         struct gb_svc_intf_set_pwrm_response response;
652         int ret;
653         u16 result_code;
654
655         memset(&request, 0, sizeof(request));
656
657         request.intf_id = intf_id;
658         request.hs_series = hs_series;
659         request.tx_mode = tx_mode;
660         request.tx_gear = tx_gear;
661         request.tx_nlanes = tx_nlanes;
662         request.tx_amplitude = tx_amplitude;
663         request.tx_hs_equalizer = tx_hs_equalizer;
664         request.rx_mode = rx_mode;
665         request.rx_gear = rx_gear;
666         request.rx_nlanes = rx_nlanes;
667         request.flags = flags;
668         request.quirks = cpu_to_le32(quirks);
669         if (local)
670                 request.local_l2timerdata = *local;
671         if (remote)
672                 request.remote_l2timerdata = *remote;
673
674         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
675                                 &request, sizeof(request),
676                                 &response, sizeof(response));
677         if (ret < 0)
678                 return ret;
679
680         result_code = response.result_code;
681         if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
682                 dev_err(&svc->dev, "set power mode = %d\n", result_code);
683                 return -EIO;
684         }
685
686         return 0;
687 }
688 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
689
690 int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
691 {
692         struct gb_svc_intf_set_pwrm_request request;
693         struct gb_svc_intf_set_pwrm_response response;
694         int ret;
695         u16 result_code;
696
697         memset(&request, 0, sizeof(request));
698
699         request.intf_id = intf_id;
700         request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
701         request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
702         request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
703
704         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
705                                 &request, sizeof(request),
706                                 &response, sizeof(response));
707         if (ret < 0) {
708                 dev_err(&svc->dev,
709                         "failed to send set power mode operation to interface %u: %d\n",
710                         intf_id, ret);
711                 return ret;
712         }
713
714         result_code = response.result_code;
715         if (result_code != GB_SVC_SETPWRM_PWR_OK) {
716                 dev_err(&svc->dev,
717                         "failed to hibernate the link for interface %u: %u\n",
718                         intf_id, result_code);
719                 return -EIO;
720         }
721
722         return 0;
723 }
724
725 int gb_svc_ping(struct gb_svc *svc)
726 {
727         return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
728                                          NULL, 0, NULL, 0,
729                                          GB_OPERATION_TIMEOUT_DEFAULT * 2);
730 }
731 EXPORT_SYMBOL_GPL(gb_svc_ping);
732
733 static int gb_svc_version_request(struct gb_operation *op)
734 {
735         struct gb_connection *connection = op->connection;
736         struct gb_svc *svc = gb_connection_get_data(connection);
737         struct gb_svc_version_request *request;
738         struct gb_svc_version_response *response;
739
740         if (op->request->payload_size < sizeof(*request)) {
741                 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
742                                 op->request->payload_size,
743                                 sizeof(*request));
744                 return -EINVAL;
745         }
746
747         request = op->request->payload;
748
749         if (request->major > GB_SVC_VERSION_MAJOR) {
750                 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
751                                 request->major, GB_SVC_VERSION_MAJOR);
752                 return -ENOTSUPP;
753         }
754
755         svc->protocol_major = request->major;
756         svc->protocol_minor = request->minor;
757
758         if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
759                 return -ENOMEM;
760
761         response = op->response->payload;
762         response->major = svc->protocol_major;
763         response->minor = svc->protocol_minor;
764
765         return 0;
766 }
767
768 static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
769                                         size_t len, loff_t *offset)
770 {
771         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
772         struct gb_svc *svc = pwrmon_rails->svc;
773         int ret, desc;
774         u32 value;
775         char buff[16];
776
777         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
778                                        GB_SVC_PWRMON_TYPE_VOL, &value);
779         if (ret) {
780                 dev_err(&svc->dev,
781                         "failed to get voltage sample %u: %d\n",
782                         pwrmon_rails->id, ret);
783                 return ret;
784         }
785
786         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
787
788         return simple_read_from_buffer(buf, len, offset, buff, desc);
789 }
790
791 static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
792                                         size_t len, loff_t *offset)
793 {
794         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
795         struct gb_svc *svc = pwrmon_rails->svc;
796         int ret, desc;
797         u32 value;
798         char buff[16];
799
800         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
801                                        GB_SVC_PWRMON_TYPE_CURR, &value);
802         if (ret) {
803                 dev_err(&svc->dev,
804                         "failed to get current sample %u: %d\n",
805                         pwrmon_rails->id, ret);
806                 return ret;
807         }
808
809         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
810
811         return simple_read_from_buffer(buf, len, offset, buff, desc);
812 }
813
814 static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
815                                       size_t len, loff_t *offset)
816 {
817         struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
818         struct gb_svc *svc = pwrmon_rails->svc;
819         int ret, desc;
820         u32 value;
821         char buff[16];
822
823         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
824                                        GB_SVC_PWRMON_TYPE_PWR, &value);
825         if (ret) {
826                 dev_err(&svc->dev, "failed to get power sample %u: %d\n",
827                         pwrmon_rails->id, ret);
828                 return ret;
829         }
830
831         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
832
833         return simple_read_from_buffer(buf, len, offset, buff, desc);
834 }
835
836 static const struct file_operations pwrmon_debugfs_voltage_fops = {
837         .read           = pwr_debugfs_voltage_read,
838 };
839
840 static const struct file_operations pwrmon_debugfs_current_fops = {
841         .read           = pwr_debugfs_current_read,
842 };
843
844 static const struct file_operations pwrmon_debugfs_power_fops = {
845         .read           = pwr_debugfs_power_read,
846 };
847
848 static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
849 {
850         int i;
851         size_t bufsize;
852         struct dentry *dent;
853         struct gb_svc_pwrmon_rail_names_get_response *rail_names;
854         u8 rail_count;
855
856         dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
857         if (IS_ERR_OR_NULL(dent))
858                 return;
859
860         if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
861                 goto err_pwrmon_debugfs;
862
863         if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
864                 goto err_pwrmon_debugfs;
865
866         bufsize = sizeof(*rail_names) +
867                 GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
868
869         rail_names = kzalloc(bufsize, GFP_KERNEL);
870         if (!rail_names)
871                 goto err_pwrmon_debugfs;
872
873         svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
874                                     GFP_KERNEL);
875         if (!svc->pwrmon_rails)
876                 goto err_pwrmon_debugfs_free;
877
878         if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
879                 goto err_pwrmon_debugfs_free;
880
881         for (i = 0; i < rail_count; i++) {
882                 struct dentry *dir;
883                 struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
884                 char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
885
886                 snprintf(fname, sizeof(fname), "%s",
887                          (char *)&rail_names->name[i]);
888
889                 rail->id = i;
890                 rail->svc = svc;
891
892                 dir = debugfs_create_dir(fname, dent);
893                 debugfs_create_file("voltage_now", S_IRUGO, dir, rail,
894                                     &pwrmon_debugfs_voltage_fops);
895                 debugfs_create_file("current_now", S_IRUGO, dir, rail,
896                                     &pwrmon_debugfs_current_fops);
897                 debugfs_create_file("power_now", S_IRUGO, dir, rail,
898                                     &pwrmon_debugfs_power_fops);
899         }
900
901         kfree(rail_names);
902         return;
903
904 err_pwrmon_debugfs_free:
905         kfree(rail_names);
906         kfree(svc->pwrmon_rails);
907         svc->pwrmon_rails = NULL;
908
909 err_pwrmon_debugfs:
910         debugfs_remove(dent);
911 }
912
913 static void gb_svc_debugfs_init(struct gb_svc *svc)
914 {
915         svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
916                                                  gb_debugfs_get());
917         gb_svc_pwrmon_debugfs_init(svc);
918 }
919
920 static void gb_svc_debugfs_exit(struct gb_svc *svc)
921 {
922         debugfs_remove_recursive(svc->debugfs_dentry);
923         kfree(svc->pwrmon_rails);
924         svc->pwrmon_rails = NULL;
925 }
926
927 static int gb_svc_hello(struct gb_operation *op)
928 {
929         struct gb_connection *connection = op->connection;
930         struct gb_svc *svc = gb_connection_get_data(connection);
931         struct gb_svc_hello_request *hello_request;
932         int ret;
933
934         if (op->request->payload_size < sizeof(*hello_request)) {
935                 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
936                                 op->request->payload_size,
937                                 sizeof(*hello_request));
938                 return -EINVAL;
939         }
940
941         hello_request = op->request->payload;
942         svc->endo_id = le16_to_cpu(hello_request->endo_id);
943         svc->ap_intf_id = hello_request->interface_id;
944
945         ret = device_add(&svc->dev);
946         if (ret) {
947                 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
948                 return ret;
949         }
950
951         ret = gb_svc_watchdog_create(svc);
952         if (ret) {
953                 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
954                 goto err_unregister_device;
955         }
956
957         gb_svc_debugfs_init(svc);
958
959         ret = gb_timesync_svc_add(svc);
960         if (ret) {
961                 dev_err(&svc->dev, "failed to add SVC to timesync: %d\n", ret);
962                 gb_svc_debugfs_exit(svc);
963                 goto err_unregister_device;
964         }
965
966         return gb_svc_queue_deferred_request(op);
967
968 err_unregister_device:
969         gb_svc_watchdog_destroy(svc);
970         device_del(&svc->dev);
971         return ret;
972 }
973
974 static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
975                                                         u8 intf_id)
976 {
977         struct gb_host_device *hd = svc->hd;
978         struct gb_module *module;
979         size_t num_interfaces;
980         u8 module_id;
981
982         list_for_each_entry(module, &hd->modules, hd_node) {
983                 module_id = module->module_id;
984                 num_interfaces = module->num_interfaces;
985
986                 if (intf_id >= module_id &&
987                                 intf_id < module_id + num_interfaces) {
988                         return module->interfaces[intf_id - module_id];
989                 }
990         }
991
992         return NULL;
993 }
994
995 static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
996 {
997         struct gb_host_device *hd = svc->hd;
998         struct gb_module *module;
999
1000         list_for_each_entry(module, &hd->modules, hd_node) {
1001                 if (module->module_id == module_id)
1002                         return module;
1003         }
1004
1005         return NULL;
1006 }
1007
1008 static void gb_svc_process_hello_deferred(struct gb_operation *operation)
1009 {
1010         struct gb_connection *connection = operation->connection;
1011         struct gb_svc *svc = gb_connection_get_data(connection);
1012         int ret;
1013
1014         /*
1015          * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
1016          * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
1017          * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
1018          * module.
1019          *
1020          * The code should be removed once SW-2217, Heuristic for UniPro
1021          * Power Mode Changes is resolved.
1022          */
1023         ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
1024                                         GB_SVC_UNIPRO_HS_SERIES_A,
1025                                         GB_SVC_UNIPRO_SLOW_AUTO_MODE,
1026                                         2, 1,
1027                                         GB_SVC_SMALL_AMPLITUDE, GB_SVC_NO_DE_EMPHASIS,
1028                                         GB_SVC_UNIPRO_SLOW_AUTO_MODE,
1029                                         2, 1,
1030                                         0, 0,
1031                                         NULL, NULL);
1032
1033         if (ret)
1034                 dev_warn(&svc->dev,
1035                         "power mode change failed on AP to switch link: %d\n",
1036                         ret);
1037 }
1038
1039 static void gb_svc_process_module_inserted(struct gb_operation *operation)
1040 {
1041         struct gb_svc_module_inserted_request *request;
1042         struct gb_connection *connection = operation->connection;
1043         struct gb_svc *svc = gb_connection_get_data(connection);
1044         struct gb_host_device *hd = svc->hd;
1045         struct gb_module *module;
1046         size_t num_interfaces;
1047         u8 module_id;
1048         u16 flags;
1049         int ret;
1050
1051         /* The request message size has already been verified. */
1052         request = operation->request->payload;
1053         module_id = request->primary_intf_id;
1054         num_interfaces = request->intf_count;
1055         flags = le16_to_cpu(request->flags);
1056
1057         dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
1058                         __func__, module_id, num_interfaces, flags);
1059
1060         if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
1061                 dev_warn(&svc->dev, "no primary interface detected on module %u\n",
1062                                 module_id);
1063         }
1064
1065         module = gb_svc_module_lookup(svc, module_id);
1066         if (module) {
1067                 dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
1068                                 module_id);
1069                 return;
1070         }
1071
1072         module = gb_module_create(hd, module_id, num_interfaces);
1073         if (!module) {
1074                 dev_err(&svc->dev, "failed to create module\n");
1075                 return;
1076         }
1077
1078         ret = gb_module_add(module);
1079         if (ret) {
1080                 gb_module_put(module);
1081                 return;
1082         }
1083
1084         list_add(&module->hd_node, &hd->modules);
1085 }
1086
1087 static void gb_svc_process_module_removed(struct gb_operation *operation)
1088 {
1089         struct gb_svc_module_removed_request *request;
1090         struct gb_connection *connection = operation->connection;
1091         struct gb_svc *svc = gb_connection_get_data(connection);
1092         struct gb_module *module;
1093         u8 module_id;
1094
1095         /* The request message size has already been verified. */
1096         request = operation->request->payload;
1097         module_id = request->primary_intf_id;
1098
1099         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
1100
1101         module = gb_svc_module_lookup(svc, module_id);
1102         if (!module) {
1103                 dev_warn(&svc->dev, "unexpected module-removed event %u\n",
1104                                 module_id);
1105                 return;
1106         }
1107
1108         module->disconnected = true;
1109
1110         gb_module_del(module);
1111         list_del(&module->hd_node);
1112         gb_module_put(module);
1113 }
1114
1115 static void gb_svc_process_intf_oops(struct gb_operation *operation)
1116 {
1117         struct gb_svc_intf_oops_request *request;
1118         struct gb_connection *connection = operation->connection;
1119         struct gb_svc *svc = gb_connection_get_data(connection);
1120         struct gb_interface *intf;
1121         u8 intf_id;
1122         u8 reason;
1123
1124         /* The request message size has already been verified. */
1125         request = operation->request->payload;
1126         intf_id = request->intf_id;
1127         reason = request->reason;
1128
1129         intf = gb_svc_interface_lookup(svc, intf_id);
1130         if (!intf) {
1131                 dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
1132                          intf_id);
1133                 return;
1134         }
1135
1136         dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
1137                  intf_id, reason);
1138
1139         mutex_lock(&intf->mutex);
1140         intf->disconnected = true;
1141         gb_interface_disable(intf);
1142         gb_interface_deactivate(intf);
1143         mutex_unlock(&intf->mutex);
1144 }
1145
1146 static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
1147 {
1148         struct gb_svc_intf_mailbox_event_request *request;
1149         struct gb_connection *connection = operation->connection;
1150         struct gb_svc *svc = gb_connection_get_data(connection);
1151         struct gb_interface *intf;
1152         u8 intf_id;
1153         u16 result_code;
1154         u32 mailbox;
1155
1156         /* The request message size has already been verified. */
1157         request = operation->request->payload;
1158         intf_id = request->intf_id;
1159         result_code = le16_to_cpu(request->result_code);
1160         mailbox = le32_to_cpu(request->mailbox);
1161
1162         dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
1163                         __func__, intf_id, result_code, mailbox);
1164
1165         intf = gb_svc_interface_lookup(svc, intf_id);
1166         if (!intf) {
1167                 dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
1168                 return;
1169         }
1170
1171         gb_interface_mailbox_event(intf, result_code, mailbox);
1172 }
1173
1174 static void gb_svc_process_deferred_request(struct work_struct *work)
1175 {
1176         struct gb_svc_deferred_request *dr;
1177         struct gb_operation *operation;
1178         struct gb_svc *svc;
1179         u8 type;
1180
1181         dr = container_of(work, struct gb_svc_deferred_request, work);
1182         operation = dr->operation;
1183         svc = gb_connection_get_data(operation->connection);
1184         type = operation->request->header->type;
1185
1186         switch (type) {
1187         case GB_SVC_TYPE_SVC_HELLO:
1188                 gb_svc_process_hello_deferred(operation);
1189                 break;
1190         case GB_SVC_TYPE_MODULE_INSERTED:
1191                 gb_svc_process_module_inserted(operation);
1192                 break;
1193         case GB_SVC_TYPE_MODULE_REMOVED:
1194                 gb_svc_process_module_removed(operation);
1195                 break;
1196         case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1197                 gb_svc_process_intf_mailbox_event(operation);
1198                 break;
1199         case GB_SVC_TYPE_INTF_OOPS:
1200                 gb_svc_process_intf_oops(operation);
1201                 break;
1202         default:
1203                 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
1204         }
1205
1206         gb_operation_put(operation);
1207         kfree(dr);
1208 }
1209
1210 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
1211 {
1212         struct gb_svc *svc = gb_connection_get_data(operation->connection);
1213         struct gb_svc_deferred_request *dr;
1214
1215         dr = kmalloc(sizeof(*dr), GFP_KERNEL);
1216         if (!dr)
1217                 return -ENOMEM;
1218
1219         gb_operation_get(operation);
1220
1221         dr->operation = operation;
1222         INIT_WORK(&dr->work, gb_svc_process_deferred_request);
1223
1224         queue_work(svc->wq, &dr->work);
1225
1226         return 0;
1227 }
1228
1229 static int gb_svc_intf_reset_recv(struct gb_operation *op)
1230 {
1231         struct gb_svc *svc = gb_connection_get_data(op->connection);
1232         struct gb_message *request = op->request;
1233         struct gb_svc_intf_reset_request *reset;
1234         u8 intf_id;
1235
1236         if (request->payload_size < sizeof(*reset)) {
1237                 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
1238                                 request->payload_size, sizeof(*reset));
1239                 return -EINVAL;
1240         }
1241         reset = request->payload;
1242
1243         intf_id = reset->intf_id;
1244
1245         /* FIXME Reset the interface here */
1246
1247         return 0;
1248 }
1249
1250 static int gb_svc_module_inserted_recv(struct gb_operation *op)
1251 {
1252         struct gb_svc *svc = gb_connection_get_data(op->connection);
1253         struct gb_svc_module_inserted_request *request;
1254
1255         if (op->request->payload_size < sizeof(*request)) {
1256                 dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
1257                                 op->request->payload_size, sizeof(*request));
1258                 return -EINVAL;
1259         }
1260
1261         request = op->request->payload;
1262
1263         dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1264                         request->primary_intf_id);
1265
1266         return gb_svc_queue_deferred_request(op);
1267 }
1268
1269 static int gb_svc_module_removed_recv(struct gb_operation *op)
1270 {
1271         struct gb_svc *svc = gb_connection_get_data(op->connection);
1272         struct gb_svc_module_removed_request *request;
1273
1274         if (op->request->payload_size < sizeof(*request)) {
1275                 dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
1276                                 op->request->payload_size, sizeof(*request));
1277                 return -EINVAL;
1278         }
1279
1280         request = op->request->payload;
1281
1282         dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1283                         request->primary_intf_id);
1284
1285         return gb_svc_queue_deferred_request(op);
1286 }
1287
1288 static int gb_svc_intf_oops_recv(struct gb_operation *op)
1289 {
1290         struct gb_svc *svc = gb_connection_get_data(op->connection);
1291         struct gb_svc_intf_oops_request *request;
1292
1293         if (op->request->payload_size < sizeof(*request)) {
1294                 dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
1295                          op->request->payload_size, sizeof(*request));
1296                 return -EINVAL;
1297         }
1298
1299         return gb_svc_queue_deferred_request(op);
1300 }
1301
1302 static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
1303 {
1304         struct gb_svc *svc = gb_connection_get_data(op->connection);
1305         struct gb_svc_intf_mailbox_event_request *request;
1306
1307         if (op->request->payload_size < sizeof(*request)) {
1308                 dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
1309                                 op->request->payload_size, sizeof(*request));
1310                 return -EINVAL;
1311         }
1312
1313         request = op->request->payload;
1314
1315         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1316
1317         return gb_svc_queue_deferred_request(op);
1318 }
1319
1320 static int gb_svc_request_handler(struct gb_operation *op)
1321 {
1322         struct gb_connection *connection = op->connection;
1323         struct gb_svc *svc = gb_connection_get_data(connection);
1324         u8 type = op->type;
1325         int ret = 0;
1326
1327         /*
1328          * SVC requests need to follow a specific order (at least initially) and
1329          * below code takes care of enforcing that. The expected order is:
1330          * - PROTOCOL_VERSION
1331          * - SVC_HELLO
1332          * - Any other request, but the earlier two.
1333          *
1334          * Incoming requests are guaranteed to be serialized and so we don't
1335          * need to protect 'state' for any races.
1336          */
1337         switch (type) {
1338         case GB_SVC_TYPE_PROTOCOL_VERSION:
1339                 if (svc->state != GB_SVC_STATE_RESET)
1340                         ret = -EINVAL;
1341                 break;
1342         case GB_SVC_TYPE_SVC_HELLO:
1343                 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
1344                         ret = -EINVAL;
1345                 break;
1346         default:
1347                 if (svc->state != GB_SVC_STATE_SVC_HELLO)
1348                         ret = -EINVAL;
1349                 break;
1350         }
1351
1352         if (ret) {
1353                 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
1354                                 type, svc->state);
1355                 return ret;
1356         }
1357
1358         switch (type) {
1359         case GB_SVC_TYPE_PROTOCOL_VERSION:
1360                 ret = gb_svc_version_request(op);
1361                 if (!ret)
1362                         svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
1363                 return ret;
1364         case GB_SVC_TYPE_SVC_HELLO:
1365                 ret = gb_svc_hello(op);
1366                 if (!ret)
1367                         svc->state = GB_SVC_STATE_SVC_HELLO;
1368                 return ret;
1369         case GB_SVC_TYPE_INTF_RESET:
1370                 return gb_svc_intf_reset_recv(op);
1371         case GB_SVC_TYPE_MODULE_INSERTED:
1372                 return gb_svc_module_inserted_recv(op);
1373         case GB_SVC_TYPE_MODULE_REMOVED:
1374                 return gb_svc_module_removed_recv(op);
1375         case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1376                 return gb_svc_intf_mailbox_event_recv(op);
1377         case GB_SVC_TYPE_INTF_OOPS:
1378                 return gb_svc_intf_oops_recv(op);
1379         default:
1380                 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
1381                 return -EINVAL;
1382         }
1383 }
1384
1385 static void gb_svc_release(struct device *dev)
1386 {
1387         struct gb_svc *svc = to_gb_svc(dev);
1388
1389         if (svc->connection)
1390                 gb_connection_destroy(svc->connection);
1391         ida_destroy(&svc->device_id_map);
1392         destroy_workqueue(svc->wq);
1393         kfree(svc);
1394 }
1395
1396 struct device_type greybus_svc_type = {
1397         .name           = "greybus_svc",
1398         .release        = gb_svc_release,
1399 };
1400
1401 struct gb_svc *gb_svc_create(struct gb_host_device *hd)
1402 {
1403         struct gb_svc *svc;
1404
1405         svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1406         if (!svc)
1407                 return NULL;
1408
1409         svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
1410         if (!svc->wq) {
1411                 kfree(svc);
1412                 return NULL;
1413         }
1414
1415         svc->dev.parent = &hd->dev;
1416         svc->dev.bus = &greybus_bus_type;
1417         svc->dev.type = &greybus_svc_type;
1418         svc->dev.groups = svc_groups;
1419         svc->dev.dma_mask = svc->dev.parent->dma_mask;
1420         device_initialize(&svc->dev);
1421
1422         dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
1423
1424         ida_init(&svc->device_id_map);
1425         svc->state = GB_SVC_STATE_RESET;
1426         svc->hd = hd;
1427
1428         svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
1429                                                 gb_svc_request_handler);
1430         if (IS_ERR(svc->connection)) {
1431                 dev_err(&svc->dev, "failed to create connection: %ld\n",
1432                                 PTR_ERR(svc->connection));
1433                 goto err_put_device;
1434         }
1435
1436         gb_connection_set_data(svc->connection, svc);
1437
1438         return svc;
1439
1440 err_put_device:
1441         put_device(&svc->dev);
1442         return NULL;
1443 }
1444
1445 int gb_svc_add(struct gb_svc *svc)
1446 {
1447         int ret;
1448
1449         /*
1450          * The SVC protocol is currently driven by the SVC, so the SVC device
1451          * is added from the connection request handler when enough
1452          * information has been received.
1453          */
1454         ret = gb_connection_enable(svc->connection);
1455         if (ret)
1456                 return ret;
1457
1458         return 0;
1459 }
1460
1461 static void gb_svc_remove_modules(struct gb_svc *svc)
1462 {
1463         struct gb_host_device *hd = svc->hd;
1464         struct gb_module *module, *tmp;
1465
1466         list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
1467                 gb_module_del(module);
1468                 list_del(&module->hd_node);
1469                 gb_module_put(module);
1470         }
1471 }
1472
1473 void gb_svc_del(struct gb_svc *svc)
1474 {
1475         gb_connection_disable_rx(svc->connection);
1476
1477         /*
1478          * The SVC device may have been registered from the request handler.
1479          */
1480         if (device_is_registered(&svc->dev)) {
1481                 gb_timesync_svc_remove(svc);
1482                 gb_svc_debugfs_exit(svc);
1483                 gb_svc_watchdog_destroy(svc);
1484                 device_del(&svc->dev);
1485         }
1486
1487         flush_workqueue(svc->wq);
1488
1489         gb_svc_remove_modules(svc);
1490
1491         gb_connection_disable(svc->connection);
1492 }
1493
1494 void gb_svc_put(struct gb_svc *svc)
1495 {
1496         put_device(&svc->dev);
1497 }