14531edb824c7099e6395393365aebc645645d80
[cascardo/linux.git] / drivers / staging / greybus / timesync.c
1 /*
2  * TimeSync API driver.
3  *
4  * Copyright 2016 Google Inc.
5  * Copyright 2016 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9 #include <linux/debugfs.h>
10 #include <linux/hrtimer.h>
11 #include "greybus.h"
12 #include "timesync.h"
13 #include "greybus_trace.h"
14
15 /*
16  * Minimum inter-strobe value of one millisecond is chosen because it
17  * just-about fits the common definition of a jiffy.
18  *
19  * Maximum value OTOH is constrained by the number of bits the SVC can fit
20  * into a 16 bit up-counter. The SVC configures the timer in microseconds
21  * so the maximum allowable value is 65535 microseconds. We clip that value
22  * to 10000 microseconds for the sake of using nice round base 10 numbers
23  * and since right-now there's no imaginable use-case requiring anything
24  * other than a one millisecond inter-strobe time, let alone something
25  * higher than ten milliseconds.
26  */
27 #define GB_TIMESYNC_STROBE_DELAY_US             1000
28 #define GB_TIMESYNC_DEFAULT_OFFSET_US           1000
29
30 /* Work queue timers long, short and SVC strobe timeout */
31 #define GB_TIMESYNC_DELAYED_WORK_LONG           msecs_to_jiffies(1000)
32 #define GB_TIMESYNC_DELAYED_WORK_SHORT          msecs_to_jiffies(1)
33 #define GB_TIMESYNC_MAX_WAIT_SVC                msecs_to_jiffies(5000)
34 #define GB_TIMESYNC_KTIME_UPDATE                msecs_to_jiffies(1000)
35 #define GB_TIMESYNC_MAX_KTIME_CONVERSION        15
36
37 /* Reported nanoseconds/femtoseconds per clock */
38 static u64 gb_timesync_ns_per_clock;
39 static u64 gb_timesync_fs_per_clock;
40
41 /* Maximum difference we will accept converting FrameTime to ktime */
42 static u32 gb_timesync_max_ktime_diff;
43
44 /* Reported clock rate */
45 static unsigned long gb_timesync_clock_rate;
46
47 /* Workqueue */
48 static void gb_timesync_worker(struct work_struct *work);
49
50 /* List of SVCs with one FrameTime per SVC */
51 static LIST_HEAD(gb_timesync_svc_list);
52
53 /* Synchronize parallel contexts accessing a valid timesync_svc pointer */
54 static DEFINE_MUTEX(gb_timesync_svc_list_mutex);
55
56 /* Structure to convert from FrameTime to timespec/ktime */
57 struct gb_timesync_frame_time_data {
58         u64 frame_time;
59         struct timespec ts;
60 };
61
62 struct gb_timesync_svc {
63         struct list_head list;
64         struct list_head interface_list;
65         struct gb_svc *svc;
66         struct gb_timesync_host_device *timesync_hd;
67
68         spinlock_t spinlock;    /* Per SVC spinlock to sync with ISR */
69         struct mutex mutex;     /* Per SVC mutex for regular synchronization */
70
71         struct dentry *frame_time_dentry;
72         struct dentry *frame_ktime_dentry;
73         struct workqueue_struct *work_queue;
74         wait_queue_head_t wait_queue;
75         struct delayed_work delayed_work;
76         struct timer_list ktime_timer;
77
78         /* The current local FrameTime */
79         u64 frame_time_offset;
80         struct gb_timesync_frame_time_data strobe_data[GB_TIMESYNC_MAX_STROBES];
81         struct gb_timesync_frame_time_data ktime_data;
82
83         /* The SVC FrameTime and relative AP FrameTime @ last TIMESYNC_PING */
84         u64 svc_ping_frame_time;
85         u64 ap_ping_frame_time;
86
87         /* Transitory settings */
88         u32 strobe_mask;
89         bool offset_down;
90         bool print_ping;
91         bool capture_ping;
92         int strobe;
93
94         /* Current state */
95         int state;
96 };
97
98 struct gb_timesync_host_device {
99         struct list_head list;
100         struct gb_host_device *hd;
101         u64 ping_frame_time;
102 };
103
104 struct gb_timesync_interface {
105         struct list_head list;
106         struct gb_interface *interface;
107         u64 ping_frame_time;
108 };
109
110 enum gb_timesync_state {
111         GB_TIMESYNC_STATE_INVALID               = 0,
112         GB_TIMESYNC_STATE_INACTIVE              = 1,
113         GB_TIMESYNC_STATE_INIT                  = 2,
114         GB_TIMESYNC_STATE_WAIT_SVC              = 3,
115         GB_TIMESYNC_STATE_AUTHORITATIVE         = 4,
116         GB_TIMESYNC_STATE_PING                  = 5,
117         GB_TIMESYNC_STATE_ACTIVE                = 6,
118 };
119
120 static void gb_timesync_ktime_timer_fn(unsigned long data);
121
122 static u64 gb_timesync_adjust_count(struct gb_timesync_svc *timesync_svc,
123                                     u64 counts)
124 {
125         if (timesync_svc->offset_down)
126                 return counts - timesync_svc->frame_time_offset;
127         else
128                 return counts + timesync_svc->frame_time_offset;
129 }
130
131 /*
132  * This function provides the authoritative FrameTime to a calling function. It
133  * is designed to be lockless and should remain that way the caller is assumed
134  * to be state-aware.
135  */
136 static u64 __gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
137 {
138         u64 clocks = gb_timesync_platform_get_counter();
139
140         return gb_timesync_adjust_count(timesync_svc, clocks);
141 }
142
143 static void gb_timesync_schedule_svc_timeout(struct gb_timesync_svc
144                                              *timesync_svc)
145 {
146         queue_delayed_work(timesync_svc->work_queue,
147                            &timesync_svc->delayed_work,
148                            GB_TIMESYNC_MAX_WAIT_SVC);
149 }
150
151 static void gb_timesync_set_state(struct gb_timesync_svc *timesync_svc,
152                                   int state)
153 {
154         switch (state) {
155         case GB_TIMESYNC_STATE_INVALID:
156                 timesync_svc->state = state;
157                 wake_up(&timesync_svc->wait_queue);
158                 break;
159         case GB_TIMESYNC_STATE_INACTIVE:
160                 timesync_svc->state = state;
161                 wake_up(&timesync_svc->wait_queue);
162                 break;
163         case GB_TIMESYNC_STATE_INIT:
164                 if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) {
165                         timesync_svc->strobe = 0;
166                         timesync_svc->frame_time_offset = 0;
167                         timesync_svc->state = state;
168                         cancel_delayed_work(&timesync_svc->delayed_work);
169                         queue_delayed_work(timesync_svc->work_queue,
170                                            &timesync_svc->delayed_work,
171                                            GB_TIMESYNC_DELAYED_WORK_LONG);
172                 }
173                 break;
174         case GB_TIMESYNC_STATE_WAIT_SVC:
175                 if (timesync_svc->state == GB_TIMESYNC_STATE_INIT)
176                         timesync_svc->state = state;
177                 break;
178         case GB_TIMESYNC_STATE_AUTHORITATIVE:
179                 if (timesync_svc->state == GB_TIMESYNC_STATE_WAIT_SVC) {
180                         timesync_svc->state = state;
181                         cancel_delayed_work(&timesync_svc->delayed_work);
182                         queue_delayed_work(timesync_svc->work_queue,
183                                            &timesync_svc->delayed_work, 0);
184                 }
185                 break;
186         case GB_TIMESYNC_STATE_PING:
187                 if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) {
188                         timesync_svc->state = state;
189                         queue_delayed_work(timesync_svc->work_queue,
190                                            &timesync_svc->delayed_work,
191                                            GB_TIMESYNC_DELAYED_WORK_SHORT);
192                 }
193                 break;
194         case GB_TIMESYNC_STATE_ACTIVE:
195                 if (timesync_svc->state == GB_TIMESYNC_STATE_AUTHORITATIVE ||
196                     timesync_svc->state == GB_TIMESYNC_STATE_PING) {
197                         timesync_svc->state = state;
198                         wake_up(&timesync_svc->wait_queue);
199                 }
200                 break;
201         }
202
203         if (WARN_ON(timesync_svc->state != state)) {
204                 pr_err("Invalid state transition %d=>%d\n",
205                        timesync_svc->state, state);
206         }
207 }
208
209 static void gb_timesync_set_state_atomic(struct gb_timesync_svc *timesync_svc,
210                                          int state)
211 {
212         unsigned long flags;
213
214         spin_lock_irqsave(&timesync_svc->spinlock, flags);
215         gb_timesync_set_state(timesync_svc, state);
216         spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
217 }
218
219 static u64 gb_timesync_diff(u64 x, u64 y)
220 {
221         if (x > y)
222                 return x - y;
223         else
224                 return y - x;
225 }
226
227 static void gb_timesync_adjust_to_svc(struct gb_timesync_svc *svc,
228                                       u64 svc_frame_time, u64 ap_frame_time)
229 {
230         if (svc_frame_time > ap_frame_time) {
231                 svc->frame_time_offset = svc_frame_time - ap_frame_time;
232                 svc->offset_down = false;
233         } else {
234                 svc->frame_time_offset = ap_frame_time - svc_frame_time;
235                 svc->offset_down = true;
236         }
237 }
238
239 /*
240  * Associate a FrameTime with a ktime timestamp represented as struct timespec
241  * Requires the calling context to hold timesync_svc->mutex
242  */
243 static void gb_timesync_store_ktime(struct gb_timesync_svc *timesync_svc,
244                                     struct timespec ts, u64 frame_time)
245 {
246         timesync_svc->ktime_data.ts = ts;
247         timesync_svc->ktime_data.frame_time = frame_time;
248 }
249
250 /*
251  * Find the two pulses that best-match our expected inter-strobe gap and
252  * then calculate the difference between the SVC time at the second pulse
253  * to the local time at the second pulse.
254  */
255 static void gb_timesync_collate_frame_time(struct gb_timesync_svc *timesync_svc,
256                                            u64 *frame_time)
257 {
258         int i = 0;
259         u64 delta, ap_frame_time;
260         u64 strobe_delay_ns = GB_TIMESYNC_STROBE_DELAY_US * NSEC_PER_USEC;
261         u64 least = 0;
262
263         for (i = 1; i < GB_TIMESYNC_MAX_STROBES; i++) {
264                 delta = timesync_svc->strobe_data[i].frame_time -
265                         timesync_svc->strobe_data[i - 1].frame_time;
266                 delta *= gb_timesync_ns_per_clock;
267                 delta = gb_timesync_diff(delta, strobe_delay_ns);
268
269                 if (!least || delta < least) {
270                         least = delta;
271                         gb_timesync_adjust_to_svc(timesync_svc, frame_time[i],
272                                                   timesync_svc->strobe_data[i].frame_time);
273
274                         ap_frame_time = timesync_svc->strobe_data[i].frame_time;
275                         ap_frame_time = gb_timesync_adjust_count(timesync_svc,
276                                                                  ap_frame_time);
277                         gb_timesync_store_ktime(timesync_svc,
278                                                 timesync_svc->strobe_data[i].ts,
279                                                 ap_frame_time);
280
281                         pr_debug("adjust %s local %llu svc %llu delta %llu\n",
282                                  timesync_svc->offset_down ? "down" : "up",
283                                  timesync_svc->strobe_data[i].frame_time,
284                                  frame_time[i], delta);
285                 }
286         }
287 }
288
289 static void gb_timesync_teardown(struct gb_timesync_svc *timesync_svc)
290 {
291         struct gb_timesync_interface *timesync_interface;
292         struct gb_svc *svc = timesync_svc->svc;
293         struct gb_interface *interface;
294         struct gb_host_device *hd;
295         int ret;
296
297         list_for_each_entry(timesync_interface,
298                             &timesync_svc->interface_list, list) {
299                 interface = timesync_interface->interface;
300                 ret = gb_interface_timesync_disable(interface);
301                 if (ret) {
302                         dev_err(&interface->dev,
303                                 "interface timesync_disable %d\n", ret);
304                 }
305         }
306
307         hd = timesync_svc->timesync_hd->hd;
308         ret = hd->driver->timesync_disable(hd);
309         if (ret < 0) {
310                 dev_err(&hd->dev, "host timesync_disable %d\n",
311                         ret);
312         }
313
314         gb_svc_timesync_wake_pins_release(svc);
315         gb_svc_timesync_disable(svc);
316         gb_timesync_platform_unlock_bus();
317
318         gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
319 }
320
321 static void gb_timesync_platform_lock_bus_fail(struct gb_timesync_svc
322                                                 *timesync_svc, int ret)
323 {
324         if (ret == -EAGAIN) {
325                 gb_timesync_set_state(timesync_svc, timesync_svc->state);
326         } else {
327                 pr_err("Failed to lock timesync bus %d\n", ret);
328                 gb_timesync_set_state(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
329         }
330 }
331
332 static void gb_timesync_enable(struct gb_timesync_svc *timesync_svc)
333 {
334         struct gb_svc *svc = timesync_svc->svc;
335         struct gb_host_device *hd;
336         struct gb_timesync_interface *timesync_interface;
337         struct gb_interface *interface;
338         u64 init_frame_time;
339         unsigned long clock_rate = gb_timesync_clock_rate;
340         int ret;
341
342         /*
343          * Get access to the wake pins in the AP and SVC
344          * Release these pins either in gb_timesync_teardown() or in
345          * gb_timesync_authoritative()
346          */
347         ret = gb_timesync_platform_lock_bus(timesync_svc);
348         if (ret < 0) {
349                 gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
350                 return;
351         }
352         ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
353         if (ret) {
354                 dev_err(&svc->dev,
355                         "gb_svc_timesync_wake_pins_acquire %d\n", ret);
356                 gb_timesync_teardown(timesync_svc);
357                 return;
358         }
359
360         /* Choose an initial time in the future */
361         init_frame_time = __gb_timesync_get_frame_time(timesync_svc) + 100000UL;
362
363         /* Send enable command to all relevant participants */
364         list_for_each_entry(timesync_interface, &timesync_svc->interface_list,
365                             list) {
366                 interface = timesync_interface->interface;
367                 ret = gb_interface_timesync_enable(interface,
368                                                    GB_TIMESYNC_MAX_STROBES,
369                                                    init_frame_time,
370                                                    GB_TIMESYNC_STROBE_DELAY_US,
371                                                    clock_rate);
372                 if (ret) {
373                         dev_err(&interface->dev,
374                                 "interface timesync_enable %d\n", ret);
375                 }
376         }
377
378         hd = timesync_svc->timesync_hd->hd;
379         ret = hd->driver->timesync_enable(hd, GB_TIMESYNC_MAX_STROBES,
380                                           init_frame_time,
381                                           GB_TIMESYNC_STROBE_DELAY_US,
382                                           clock_rate);
383         if (ret < 0) {
384                 dev_err(&hd->dev, "host timesync_enable %d\n",
385                         ret);
386         }
387
388         gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_WAIT_SVC);
389         ret = gb_svc_timesync_enable(svc, GB_TIMESYNC_MAX_STROBES,
390                                      init_frame_time,
391                                      GB_TIMESYNC_STROBE_DELAY_US,
392                                      clock_rate);
393         if (ret) {
394                 dev_err(&svc->dev,
395                         "gb_svc_timesync_enable %d\n", ret);
396                 gb_timesync_teardown(timesync_svc);
397                 return;
398         }
399
400         /* Schedule a timeout waiting for SVC to complete strobing */
401         gb_timesync_schedule_svc_timeout(timesync_svc);
402 }
403
404 static void gb_timesync_authoritative(struct gb_timesync_svc *timesync_svc)
405 {
406         struct gb_svc *svc = timesync_svc->svc;
407         struct gb_host_device *hd;
408         struct gb_timesync_interface *timesync_interface;
409         struct gb_interface *interface;
410         u64 svc_frame_time[GB_TIMESYNC_MAX_STROBES];
411         int ret;
412
413         /* Get authoritative time from SVC and adjust local clock */
414         ret = gb_svc_timesync_authoritative(svc, svc_frame_time);
415         if (ret) {
416                 dev_err(&svc->dev,
417                         "gb_svc_timesync_authoritative %d\n", ret);
418                 gb_timesync_teardown(timesync_svc);
419                 return;
420         }
421         gb_timesync_collate_frame_time(timesync_svc, svc_frame_time);
422
423         /* Transmit authoritative time to downstream slaves */
424         hd = timesync_svc->timesync_hd->hd;
425         ret = hd->driver->timesync_authoritative(hd, svc_frame_time);
426         if (ret < 0)
427                 dev_err(&hd->dev, "host timesync_authoritative %d\n", ret);
428
429         list_for_each_entry(timesync_interface,
430                             &timesync_svc->interface_list, list) {
431                 interface = timesync_interface->interface;
432                 ret = gb_interface_timesync_authoritative(
433                                                 interface,
434                                                 svc_frame_time);
435                 if (ret) {
436                         dev_err(&interface->dev,
437                                 "interface timesync_authoritative %d\n", ret);
438                 }
439         }
440
441         /* Release wake pins */
442         gb_svc_timesync_wake_pins_release(svc);
443         gb_timesync_platform_unlock_bus();
444
445         /* Transition to state ACTIVE */
446         gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
447
448         /* Schedule a ping to verify the synchronized system time */
449         timesync_svc->print_ping = true;
450         gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_PING);
451 }
452
453 static int __gb_timesync_get_status(struct gb_timesync_svc *timesync_svc)
454 {
455         int ret = -EINVAL;
456
457         switch (timesync_svc->state) {
458         case GB_TIMESYNC_STATE_INVALID:
459         case GB_TIMESYNC_STATE_INACTIVE:
460                 ret = -ENODEV;
461                 break;
462         case GB_TIMESYNC_STATE_INIT:
463         case GB_TIMESYNC_STATE_WAIT_SVC:
464         case GB_TIMESYNC_STATE_AUTHORITATIVE:
465         case GB_TIMESYNC_STATE_PING:
466                 ret = -EAGAIN;
467                 break;
468         case GB_TIMESYNC_STATE_ACTIVE:
469                 ret = 0;
470                 break;
471         }
472         return ret;
473 }
474
475 /*
476  * This routine takes a FrameTime and derives the difference with-respect
477  * to a reference FrameTime/ktime pair. It then returns the calculated
478  * ktime based on the difference between the supplied FrameTime and
479  * the reference FrameTime.
480  *
481  * The time difference is calculated to six decimal places. Taking 19.2MHz
482  * as an example this means we have 52.083333~ nanoseconds per clock or
483  * 52083333~ femtoseconds per clock.
484  *
485  * Naively taking the count difference and converting to
486  * seconds/nanoseconds would quickly see the 0.0833 component produce
487  * noticeable errors. For example a time difference of one second would
488  * loose 19200000 * 0.08333x nanoseconds or 1.59 seconds.
489  *
490  * In contrast calculating in femtoseconds the same example of 19200000 *
491  * 0.000000083333x nanoseconds per count of error is just 1.59 nanoseconds!
492  *
493  * Continuing the example of 19.2 MHz we cap the maximum error difference
494  * at a worst-case 0.3 microseconds over a potential calculation window of
495  * abount 15 seconds, meaning you can convert a FrameTime that is <= 15
496  * seconds older/younger than the reference time with a maximum error of
497  * 0.2385 useconds. Note 19.2MHz is an example frequency not a requirement.
498  */
499 static int gb_timesync_to_timespec(struct gb_timesync_svc *timesync_svc,
500                                    u64 frame_time, struct timespec *ts)
501 {
502         unsigned long flags;
503         u64 delta_fs, counts, sec, nsec;
504         bool add;
505         int ret = 0;
506
507         memset(ts, 0x00, sizeof(*ts));
508         mutex_lock(&timesync_svc->mutex);
509         spin_lock_irqsave(&timesync_svc->spinlock, flags);
510
511         ret = __gb_timesync_get_status(timesync_svc);
512         if (ret)
513                 goto done;
514
515         /* Support calculating ktime upwards or downwards from the reference */
516         if (frame_time < timesync_svc->ktime_data.frame_time) {
517                 add = false;
518                 counts = timesync_svc->ktime_data.frame_time - frame_time;
519         } else {
520                 add = true;
521                 counts = frame_time - timesync_svc->ktime_data.frame_time;
522         }
523
524         /* Enforce the .23 of a usecond boundary @ 19.2MHz */
525         if (counts > gb_timesync_max_ktime_diff) {
526                 ret = -EINVAL;
527                 goto done;
528         }
529
530         /* Determine the time difference in femtoseconds */
531         delta_fs = counts * gb_timesync_fs_per_clock;
532
533         /* Convert to seconds */
534         sec = delta_fs;
535         do_div(sec, NSEC_PER_SEC);
536         do_div(sec, 1000000UL);
537
538         /* Get the nanosecond remainder */
539         nsec = do_div(delta_fs, sec);
540         do_div(nsec, 1000000UL);
541
542         if (add) {
543                 /* Add the calculated offset - overflow nanoseconds upwards */
544                 ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec + sec;
545                 ts->tv_nsec = timesync_svc->ktime_data.ts.tv_nsec + nsec;
546                 if (ts->tv_nsec >= NSEC_PER_SEC) {
547                         ts->tv_sec++;
548                         ts->tv_nsec -= NSEC_PER_SEC;
549                 }
550         } else {
551                 /* Subtract the difference over/underflow as necessary */
552                 if (nsec > timesync_svc->ktime_data.ts.tv_nsec) {
553                         sec++;
554                         nsec = nsec + timesync_svc->ktime_data.ts.tv_nsec;
555                         nsec = do_div(nsec, NSEC_PER_SEC);
556                 } else {
557                         nsec = timesync_svc->ktime_data.ts.tv_nsec - nsec;
558                 }
559                 /* Cannot return a negative second value */
560                 if (sec > timesync_svc->ktime_data.ts.tv_sec) {
561                         ret = -EINVAL;
562                         goto done;
563                 }
564                 ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec - sec;
565                 ts->tv_nsec = nsec;
566         }
567 done:
568         spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
569         mutex_unlock(&timesync_svc->mutex);
570         return ret;
571 }
572
573 static size_t gb_timesync_log_frame_time(struct gb_timesync_svc *timesync_svc,
574                                          char *buf, size_t buflen)
575 {
576         struct gb_svc *svc = timesync_svc->svc;
577         struct gb_host_device *hd;
578         struct gb_timesync_interface *timesync_interface;
579         struct gb_interface *interface;
580         unsigned int len;
581         size_t off;
582
583         /* AP/SVC */
584         off = snprintf(buf, buflen, "%s frametime: ap=%llu %s=%llu ",
585                        greybus_bus_type.name,
586                        timesync_svc->ap_ping_frame_time, dev_name(&svc->dev),
587                        timesync_svc->svc_ping_frame_time);
588         len = buflen - off;
589
590         /* APB/GPB */
591         if (len < buflen) {
592                 hd = timesync_svc->timesync_hd->hd;
593                 off += snprintf(&buf[off], len, "%s=%llu ", dev_name(&hd->dev),
594                                 timesync_svc->timesync_hd->ping_frame_time);
595                 len = buflen - off;
596         }
597
598         list_for_each_entry(timesync_interface,
599                             &timesync_svc->interface_list, list) {
600                 if (len < buflen) {
601                         interface = timesync_interface->interface;
602                         off += snprintf(&buf[off], len, "%s=%llu ",
603                                         dev_name(&interface->dev),
604                                         timesync_interface->ping_frame_time);
605                         len = buflen - off;
606                 }
607         }
608         if (len < buflen)
609                 off += snprintf(&buf[off], len, "\n");
610         return off;
611 }
612
613 static size_t gb_timesync_log_frame_ktime(struct gb_timesync_svc *timesync_svc,
614                                           char *buf, size_t buflen)
615 {
616         struct gb_svc *svc = timesync_svc->svc;
617         struct gb_host_device *hd;
618         struct gb_timesync_interface *timesync_interface;
619         struct gb_interface *interface;
620         struct timespec ts;
621         unsigned int len;
622         size_t off;
623
624         /* AP */
625         gb_timesync_to_timespec(timesync_svc, timesync_svc->ap_ping_frame_time,
626                                 &ts);
627         off = snprintf(buf, buflen, "%s frametime: ap=%lu.%lu ",
628                        greybus_bus_type.name, ts.tv_sec, ts.tv_nsec);
629         len = buflen - off;
630         if (len >= buflen)
631                 goto done;
632
633         /* SVC */
634         gb_timesync_to_timespec(timesync_svc, timesync_svc->svc_ping_frame_time,
635                                 &ts);
636         off += snprintf(&buf[off], len, "%s=%lu.%lu ", dev_name(&svc->dev),
637                         ts.tv_sec, ts.tv_nsec);
638         len = buflen - off;
639         if (len >= buflen)
640                 goto done;
641
642         /* APB/GPB */
643         hd = timesync_svc->timesync_hd->hd;
644         gb_timesync_to_timespec(timesync_svc,
645                                 timesync_svc->timesync_hd->ping_frame_time,
646                                 &ts);
647         off += snprintf(&buf[off], len, "%s=%lu.%lu ",
648                         dev_name(&hd->dev),
649                         ts.tv_sec, ts.tv_nsec);
650         len = buflen - off;
651         if (len >= buflen)
652                 goto done;
653
654         list_for_each_entry(timesync_interface,
655                             &timesync_svc->interface_list, list) {
656                 interface = timesync_interface->interface;
657                 gb_timesync_to_timespec(timesync_svc,
658                                         timesync_interface->ping_frame_time,
659                                         &ts);
660                 off += snprintf(&buf[off], len, "%s=%lu.%lu ",
661                                 dev_name(&interface->dev),
662                                 ts.tv_sec, ts.tv_nsec);
663                 len = buflen - off;
664                 if (len >= buflen)
665                         goto done;
666         }
667         off += snprintf(&buf[off], len, "\n");
668 done:
669         return off;
670 }
671
672 /*
673  * Send an SVC initiated wake 'ping' to each TimeSync participant.
674  * Get the FrameTime from each participant associated with the wake
675  * ping.
676  */
677 static void gb_timesync_ping(struct gb_timesync_svc *timesync_svc)
678 {
679         struct gb_svc *svc = timesync_svc->svc;
680         struct gb_host_device *hd;
681         struct gb_timesync_interface *timesync_interface;
682         struct gb_control *control;
683         u64 *ping_frame_time;
684         int ret;
685
686         /* Get access to the wake pins in the AP and SVC */
687         ret = gb_timesync_platform_lock_bus(timesync_svc);
688         if (ret < 0) {
689                 gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
690                 return;
691         }
692         ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
693         if (ret) {
694                 dev_err(&svc->dev,
695                         "gb_svc_timesync_wake_pins_acquire %d\n", ret);
696                 gb_timesync_teardown(timesync_svc);
697                 return;
698         }
699
700         /* Have SVC generate a timesync ping */
701         timesync_svc->capture_ping = true;
702         timesync_svc->svc_ping_frame_time = 0;
703         ret = gb_svc_timesync_ping(svc, &timesync_svc->svc_ping_frame_time);
704         timesync_svc->capture_ping = false;
705         if (ret) {
706                 dev_err(&svc->dev,
707                         "gb_svc_timesync_ping %d\n", ret);
708                 gb_timesync_teardown(timesync_svc);
709                 return;
710         }
711
712         /* Get the ping FrameTime from each APB/GPB */
713         hd = timesync_svc->timesync_hd->hd;
714         timesync_svc->timesync_hd->ping_frame_time = 0;
715         ret = hd->driver->timesync_get_last_event(hd,
716                 &timesync_svc->timesync_hd->ping_frame_time);
717         if (ret)
718                 dev_err(&hd->dev, "host timesync_get_last_event %d\n", ret);
719
720         list_for_each_entry(timesync_interface,
721                             &timesync_svc->interface_list, list) {
722                 control = timesync_interface->interface->control;
723                 timesync_interface->ping_frame_time = 0;
724                 ping_frame_time = &timesync_interface->ping_frame_time;
725                 ret = gb_control_timesync_get_last_event(control,
726                                                          ping_frame_time);
727                 if (ret) {
728                         dev_err(&timesync_interface->interface->dev,
729                                 "gb_control_timesync_get_last_event %d\n", ret);
730                 }
731         }
732
733         /* Ping success - move to timesync active */
734         gb_svc_timesync_wake_pins_release(svc);
735         gb_timesync_platform_unlock_bus();
736         gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
737 }
738
739 static void gb_timesync_log_ping_time(struct gb_timesync_svc *timesync_svc)
740 {
741         char *buf;
742
743         if (!timesync_svc->print_ping)
744                 return;
745
746         buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
747         if (buf) {
748                 gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
749                 pr_info("%s", buf);
750                 kfree(buf);
751         }
752 }
753
754 /*
755  * Perform the actual work of scheduled TimeSync logic.
756  */
757 static void gb_timesync_worker(struct work_struct *work)
758 {
759         struct delayed_work *delayed_work = to_delayed_work(work);
760         struct gb_timesync_svc *timesync_svc =
761                 container_of(delayed_work, struct gb_timesync_svc, delayed_work);
762
763         mutex_lock(&timesync_svc->mutex);
764
765         switch (timesync_svc->state) {
766         case GB_TIMESYNC_STATE_INIT:
767                 gb_timesync_enable(timesync_svc);
768                 break;
769
770         case GB_TIMESYNC_STATE_WAIT_SVC:
771                 dev_err(&timesync_svc->svc->dev,
772                         "timeout SVC strobe completion\n");
773                 gb_timesync_teardown(timesync_svc);
774                 break;
775
776         case GB_TIMESYNC_STATE_AUTHORITATIVE:
777                 gb_timesync_authoritative(timesync_svc);
778                 break;
779
780         case GB_TIMESYNC_STATE_PING:
781                 gb_timesync_ping(timesync_svc);
782                 gb_timesync_log_ping_time(timesync_svc);
783                 break;
784
785         default:
786                 pr_err("Invalid state %d for delayed work\n",
787                        timesync_svc->state);
788                 break;
789         }
790
791         mutex_unlock(&timesync_svc->mutex);
792 }
793
794 /*
795  * Schedule a new TimeSync INIT or PING operation serialized w/r to
796  * gb_timesync_worker().
797  */
798 static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state)
799 {
800         int ret = 0;
801
802         if (state != GB_TIMESYNC_STATE_INIT && state != GB_TIMESYNC_STATE_PING)
803                 return -EINVAL;
804
805         mutex_lock(&timesync_svc->mutex);
806         if (timesync_svc->state ==  GB_TIMESYNC_STATE_INACTIVE ||
807             timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) {
808                 gb_timesync_set_state_atomic(timesync_svc, state);
809         } else {
810                 ret = -ENODEV;
811         }
812         mutex_unlock(&timesync_svc->mutex);
813         return ret;
814 }
815
816 static int __gb_timesync_schedule_synchronous(
817         struct gb_timesync_svc *timesync_svc, int state)
818 {
819         unsigned long flags;
820         int ret;
821
822         ret = gb_timesync_schedule(timesync_svc, state);
823         if (ret)
824                 return ret;
825
826         ret = wait_event_interruptible(timesync_svc->wait_queue,
827                         (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE ||
828                          timesync_svc->state == GB_TIMESYNC_STATE_INACTIVE ||
829                          timesync_svc->state == GB_TIMESYNC_STATE_INVALID));
830         if (ret)
831                 return ret;
832
833         mutex_lock(&timesync_svc->mutex);
834         spin_lock_irqsave(&timesync_svc->spinlock, flags);
835
836         ret = __gb_timesync_get_status(timesync_svc);
837
838         spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
839         mutex_unlock(&timesync_svc->mutex);
840
841         return ret;
842 }
843
844 static struct gb_timesync_svc *gb_timesync_find_timesync_svc(
845         struct gb_host_device *hd)
846 {
847         struct gb_timesync_svc *timesync_svc;
848
849         list_for_each_entry(timesync_svc, &gb_timesync_svc_list, list) {
850                 if (timesync_svc->svc == hd->svc)
851                         return timesync_svc;
852         }
853         return NULL;
854 }
855
856 static struct gb_timesync_interface *gb_timesync_find_timesync_interface(
857         struct gb_timesync_svc *timesync_svc,
858         struct gb_interface *interface)
859 {
860         struct gb_timesync_interface *timesync_interface;
861
862         list_for_each_entry(timesync_interface, &timesync_svc->interface_list, list) {
863                 if (timesync_interface->interface == interface)
864                         return timesync_interface;
865         }
866         return NULL;
867 }
868
869 int gb_timesync_schedule_synchronous(struct gb_interface *interface)
870 {
871         int ret;
872         struct gb_timesync_svc *timesync_svc;
873
874         if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
875                 return 0;
876
877         mutex_lock(&gb_timesync_svc_list_mutex);
878         timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
879         if (!timesync_svc) {
880                 ret = -ENODEV;
881                 goto done;
882         }
883
884         ret = __gb_timesync_schedule_synchronous(timesync_svc,
885                                                  GB_TIMESYNC_STATE_INIT);
886 done:
887         mutex_unlock(&gb_timesync_svc_list_mutex);
888         return ret;
889 }
890 EXPORT_SYMBOL_GPL(gb_timesync_schedule_synchronous);
891
892 void gb_timesync_schedule_asynchronous(struct gb_interface *interface)
893 {
894         struct gb_timesync_svc *timesync_svc;
895
896         if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
897                 return;
898
899         mutex_lock(&gb_timesync_svc_list_mutex);
900         timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
901         if (!timesync_svc)
902                 goto done;
903
904         gb_timesync_schedule(timesync_svc, GB_TIMESYNC_STATE_INIT);
905 done:
906         mutex_unlock(&gb_timesync_svc_list_mutex);
907         return;
908 }
909 EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous);
910
911 static ssize_t gb_timesync_ping_read(struct file *file, char __user *ubuf,
912                                      size_t len, loff_t *offset, bool ktime)
913 {
914         struct gb_timesync_svc *timesync_svc = file->f_inode->i_private;
915         char *buf;
916         ssize_t ret = 0;
917
918         mutex_lock(&gb_timesync_svc_list_mutex);
919         mutex_lock(&timesync_svc->mutex);
920         if (list_empty(&timesync_svc->interface_list))
921                 ret = -ENODEV;
922         timesync_svc->print_ping = false;
923         mutex_unlock(&timesync_svc->mutex);
924         if (ret)
925                 goto done;
926
927         ret = __gb_timesync_schedule_synchronous(timesync_svc,
928                                                  GB_TIMESYNC_STATE_PING);
929         if (ret)
930                 goto done;
931
932         buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
933         if (!buf) {
934                 ret = -ENOMEM;
935                 goto done;
936         }
937
938         if (ktime)
939                 ret = gb_timesync_log_frame_ktime(timesync_svc, buf, PAGE_SIZE);
940         else
941                 ret = gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
942         if (ret > 0)
943                 ret = simple_read_from_buffer(ubuf, len, offset, buf, ret);
944         kfree(buf);
945 done:
946         mutex_unlock(&gb_timesync_svc_list_mutex);
947         return ret;
948 }
949
950 static ssize_t gb_timesync_ping_read_frame_time(struct file *file,
951                                                 char __user *buf,
952                                                 size_t len, loff_t *offset)
953 {
954         return gb_timesync_ping_read(file, buf, len, offset, false);
955 }
956
957 static ssize_t gb_timesync_ping_read_frame_ktime(struct file *file,
958                                                  char __user *buf,
959                                                  size_t len, loff_t *offset)
960 {
961         return gb_timesync_ping_read(file, buf, len, offset, true);
962 }
963
964 static const struct file_operations gb_timesync_debugfs_frame_time_ops = {
965         .read           = gb_timesync_ping_read_frame_time,
966 };
967
968 static const struct file_operations gb_timesync_debugfs_frame_ktime_ops = {
969         .read           = gb_timesync_ping_read_frame_ktime,
970 };
971
972 static int gb_timesync_hd_add(struct gb_timesync_svc *timesync_svc,
973                               struct gb_host_device *hd)
974 {
975         struct gb_timesync_host_device *timesync_hd;
976
977         timesync_hd = kzalloc(sizeof(*timesync_hd), GFP_KERNEL);
978         if (!timesync_hd)
979                 return -ENOMEM;
980
981         WARN_ON(timesync_svc->timesync_hd);
982         timesync_hd->hd = hd;
983         timesync_svc->timesync_hd = timesync_hd;
984
985         return 0;
986 }
987
988 static void gb_timesync_hd_remove(struct gb_timesync_svc *timesync_svc,
989                                   struct gb_host_device *hd)
990 {
991         if (timesync_svc->timesync_hd->hd == hd) {
992                 kfree(timesync_svc->timesync_hd);
993                 timesync_svc->timesync_hd = NULL;
994                 return;
995         }
996         WARN_ON(1);
997 }
998
999 int gb_timesync_svc_add(struct gb_svc *svc)
1000 {
1001         struct gb_timesync_svc *timesync_svc;
1002         int ret;
1003
1004         timesync_svc = kzalloc(sizeof(*timesync_svc), GFP_KERNEL);
1005         if (!timesync_svc)
1006                 return -ENOMEM;
1007
1008         timesync_svc->work_queue =
1009                 create_singlethread_workqueue("gb-timesync-work_queue");
1010
1011         if (!timesync_svc->work_queue) {
1012                 kfree(timesync_svc);
1013                 return -ENOMEM;
1014         }
1015
1016         mutex_lock(&gb_timesync_svc_list_mutex);
1017         INIT_LIST_HEAD(&timesync_svc->interface_list);
1018         INIT_DELAYED_WORK(&timesync_svc->delayed_work, gb_timesync_worker);
1019         mutex_init(&timesync_svc->mutex);
1020         spin_lock_init(&timesync_svc->spinlock);
1021         init_waitqueue_head(&timesync_svc->wait_queue);
1022
1023         timesync_svc->svc = svc;
1024         timesync_svc->frame_time_offset = 0;
1025         timesync_svc->capture_ping = false;
1026         gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
1027
1028         timesync_svc->frame_time_dentry =
1029                 debugfs_create_file("frame-time", S_IRUGO, svc->debugfs_dentry,
1030                                     timesync_svc,
1031                                     &gb_timesync_debugfs_frame_time_ops);
1032         timesync_svc->frame_ktime_dentry =
1033                 debugfs_create_file("frame-ktime", S_IRUGO, svc->debugfs_dentry,
1034                                     timesync_svc,
1035                                     &gb_timesync_debugfs_frame_ktime_ops);
1036
1037         list_add(&timesync_svc->list, &gb_timesync_svc_list);
1038         ret = gb_timesync_hd_add(timesync_svc, svc->hd);
1039         if (ret) {
1040                 list_del(&timesync_svc->list);
1041                 debugfs_remove(timesync_svc->frame_ktime_dentry);
1042                 debugfs_remove(timesync_svc->frame_time_dentry);
1043                 destroy_workqueue(timesync_svc->work_queue);
1044                 kfree(timesync_svc);
1045                 goto done;
1046         }
1047
1048         init_timer(&timesync_svc->ktime_timer);
1049         timesync_svc->ktime_timer.function = gb_timesync_ktime_timer_fn;
1050         timesync_svc->ktime_timer.expires = jiffies + GB_TIMESYNC_KTIME_UPDATE;
1051         timesync_svc->ktime_timer.data = (unsigned long)timesync_svc;
1052         add_timer(&timesync_svc->ktime_timer);
1053 done:
1054         mutex_unlock(&gb_timesync_svc_list_mutex);
1055         return ret;
1056 }
1057 EXPORT_SYMBOL_GPL(gb_timesync_svc_add);
1058
1059 void gb_timesync_svc_remove(struct gb_svc *svc)
1060 {
1061         struct gb_timesync_svc *timesync_svc;
1062         struct gb_timesync_interface *timesync_interface;
1063         struct gb_timesync_interface *next;
1064
1065         mutex_lock(&gb_timesync_svc_list_mutex);
1066         timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1067         if (!timesync_svc)
1068                 goto done;
1069
1070         cancel_delayed_work_sync(&timesync_svc->delayed_work);
1071
1072         mutex_lock(&timesync_svc->mutex);
1073
1074         gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INVALID);
1075         del_timer_sync(&timesync_svc->ktime_timer);
1076         gb_timesync_teardown(timesync_svc);
1077
1078         gb_timesync_hd_remove(timesync_svc, svc->hd);
1079         list_for_each_entry_safe(timesync_interface, next,
1080                                  &timesync_svc->interface_list, list) {
1081                 list_del(&timesync_interface->list);
1082                 kfree(timesync_interface);
1083         }
1084         debugfs_remove(timesync_svc->frame_ktime_dentry);
1085         debugfs_remove(timesync_svc->frame_time_dentry);
1086         destroy_workqueue(timesync_svc->work_queue);
1087         list_del(&timesync_svc->list);
1088
1089         mutex_unlock(&timesync_svc->mutex);
1090
1091         kfree(timesync_svc);
1092 done:
1093         mutex_unlock(&gb_timesync_svc_list_mutex);
1094 }
1095 EXPORT_SYMBOL_GPL(gb_timesync_svc_remove);
1096
1097 /*
1098  * Add a Greybus Interface to the set of TimeSync Interfaces.
1099  */
1100 int gb_timesync_interface_add(struct gb_interface *interface)
1101 {
1102         struct gb_timesync_svc *timesync_svc;
1103         struct gb_timesync_interface *timesync_interface;
1104         int ret = 0;
1105
1106         if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
1107                 return 0;
1108
1109         mutex_lock(&gb_timesync_svc_list_mutex);
1110         timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1111         if (!timesync_svc) {
1112                 ret = -ENODEV;
1113                 goto done;
1114         }
1115
1116         timesync_interface = kzalloc(sizeof(*timesync_interface), GFP_KERNEL);
1117         if (!timesync_interface) {
1118                 ret = -ENOMEM;
1119                 goto done;
1120         }
1121
1122         mutex_lock(&timesync_svc->mutex);
1123         timesync_interface->interface = interface;
1124         list_add(&timesync_interface->list, &timesync_svc->interface_list);
1125         timesync_svc->strobe_mask |= 1 << interface->interface_id;
1126         mutex_unlock(&timesync_svc->mutex);
1127
1128 done:
1129         mutex_unlock(&gb_timesync_svc_list_mutex);
1130         return ret;
1131 }
1132 EXPORT_SYMBOL_GPL(gb_timesync_interface_add);
1133
1134 /*
1135  * Remove a Greybus Interface from the set of TimeSync Interfaces.
1136  */
1137 void gb_timesync_interface_remove(struct gb_interface *interface)
1138 {
1139         struct gb_timesync_svc *timesync_svc;
1140         struct gb_timesync_interface *timesync_interface;
1141
1142         if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
1143                 return;
1144
1145         mutex_lock(&gb_timesync_svc_list_mutex);
1146         timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1147         if (!timesync_svc)
1148                 goto done;
1149
1150         timesync_interface = gb_timesync_find_timesync_interface(timesync_svc,
1151                                                                  interface);
1152         if (!timesync_interface)
1153                 goto done;
1154
1155         mutex_lock(&timesync_svc->mutex);
1156         timesync_svc->strobe_mask &= ~(1 << interface->interface_id);
1157         list_del(&timesync_interface->list);
1158         kfree(timesync_interface);
1159         mutex_unlock(&timesync_svc->mutex);
1160 done:
1161         mutex_unlock(&gb_timesync_svc_list_mutex);
1162 }
1163 EXPORT_SYMBOL_GPL(gb_timesync_interface_remove);
1164
1165 /*
1166  * Give the authoritative FrameTime to the calling function. Returns zero if we
1167  * are not in GB_TIMESYNC_STATE_ACTIVE.
1168  */
1169 static u64 gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
1170 {
1171         unsigned long flags;
1172         u64 ret;
1173
1174         spin_lock_irqsave(&timesync_svc->spinlock, flags);
1175         if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE)
1176                 ret = __gb_timesync_get_frame_time(timesync_svc);
1177         else
1178                 ret = 0;
1179         spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
1180         return ret;
1181 }
1182
1183 u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface)
1184 {
1185         struct gb_timesync_svc *timesync_svc;
1186         u64 ret = 0;
1187
1188         mutex_lock(&gb_timesync_svc_list_mutex);
1189         timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1190         if (!timesync_svc)
1191                 goto done;
1192
1193         ret = gb_timesync_get_frame_time(timesync_svc);
1194 done:
1195         mutex_unlock(&gb_timesync_svc_list_mutex);
1196         return ret;
1197 }
1198 EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_interface);
1199
1200 u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc)
1201 {
1202         struct gb_timesync_svc *timesync_svc;
1203         u64 ret = 0;
1204
1205         mutex_lock(&gb_timesync_svc_list_mutex);
1206         timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1207         if (!timesync_svc)
1208                 goto done;
1209
1210         ret = gb_timesync_get_frame_time(timesync_svc);
1211 done:
1212         mutex_unlock(&gb_timesync_svc_list_mutex);
1213         return ret;
1214 }
1215 EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_svc);
1216
1217 /* Incrementally updates the conversion base from FrameTime to ktime */
1218 static void gb_timesync_ktime_timer_fn(unsigned long data)
1219 {
1220         struct gb_timesync_svc *timesync_svc =
1221                 (struct gb_timesync_svc *)data;
1222         unsigned long flags;
1223         u64 frame_time;
1224         struct timespec ts;
1225
1226         spin_lock_irqsave(&timesync_svc->spinlock, flags);
1227
1228         if (timesync_svc->state != GB_TIMESYNC_STATE_ACTIVE)
1229                 goto done;
1230
1231         ktime_get_ts(&ts);
1232         frame_time = __gb_timesync_get_frame_time(timesync_svc);
1233         gb_timesync_store_ktime(timesync_svc, ts, frame_time);
1234
1235 done:
1236         spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
1237         mod_timer(&timesync_svc->ktime_timer,
1238                   jiffies + GB_TIMESYNC_KTIME_UPDATE);
1239 }
1240
1241 int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time,
1242                                    struct timespec *ts)
1243 {
1244         struct gb_timesync_svc *timesync_svc;
1245         int ret = 0;
1246
1247         mutex_lock(&gb_timesync_svc_list_mutex);
1248         timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1249         if (!timesync_svc) {
1250                 ret = -ENODEV;
1251                 goto done;
1252         }
1253         ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
1254 done:
1255         mutex_unlock(&gb_timesync_svc_list_mutex);
1256         return ret;
1257 }
1258 EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_svc);
1259
1260 int gb_timesync_to_timespec_by_interface(struct gb_interface *interface,
1261                                          u64 frame_time, struct timespec *ts)
1262 {
1263         struct gb_timesync_svc *timesync_svc;
1264         int ret = 0;
1265
1266         mutex_lock(&gb_timesync_svc_list_mutex);
1267         timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1268         if (!timesync_svc) {
1269                 ret = -ENODEV;
1270                 goto done;
1271         }
1272
1273         ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
1274 done:
1275         mutex_unlock(&gb_timesync_svc_list_mutex);
1276         return ret;
1277 }
1278 EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_interface);
1279
1280 void gb_timesync_irq(struct gb_timesync_svc *timesync_svc)
1281 {
1282         unsigned long flags;
1283         u64 strobe_time;
1284         bool strobe_is_ping = true;
1285         struct timespec ts;
1286
1287         ktime_get_ts(&ts);
1288         strobe_time = __gb_timesync_get_frame_time(timesync_svc);
1289
1290         spin_lock_irqsave(&timesync_svc->spinlock, flags);
1291
1292         if (timesync_svc->state == GB_TIMESYNC_STATE_PING) {
1293                 if (!timesync_svc->capture_ping)
1294                         goto done_nolog;
1295                 timesync_svc->ap_ping_frame_time = strobe_time;
1296                 goto done_log;
1297         } else if (timesync_svc->state != GB_TIMESYNC_STATE_WAIT_SVC) {
1298                 goto done_nolog;
1299         }
1300
1301         timesync_svc->strobe_data[timesync_svc->strobe].frame_time = strobe_time;
1302         timesync_svc->strobe_data[timesync_svc->strobe].ts = ts;
1303
1304         if (++timesync_svc->strobe == GB_TIMESYNC_MAX_STROBES) {
1305                 gb_timesync_set_state(timesync_svc,
1306                                       GB_TIMESYNC_STATE_AUTHORITATIVE);
1307         }
1308         strobe_is_ping = false;
1309 done_log:
1310         trace_gb_timesync_irq(strobe_is_ping, timesync_svc->strobe,
1311                               GB_TIMESYNC_MAX_STROBES, strobe_time);
1312 done_nolog:
1313         spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
1314 }
1315 EXPORT_SYMBOL(gb_timesync_irq);
1316
1317 int __init gb_timesync_init(void)
1318 {
1319         int ret = 0;
1320
1321         ret = gb_timesync_platform_init();
1322         if (ret) {
1323                 pr_err("timesync platform init fail!\n");
1324                 return ret;
1325         }
1326
1327         gb_timesync_clock_rate = gb_timesync_platform_get_clock_rate();
1328
1329         /* Calculate nanoseconds and femtoseconds per clock */
1330         gb_timesync_fs_per_clock = FSEC_PER_SEC;
1331         do_div(gb_timesync_fs_per_clock, gb_timesync_clock_rate);
1332         gb_timesync_ns_per_clock = NSEC_PER_SEC;
1333         do_div(gb_timesync_ns_per_clock, gb_timesync_clock_rate);
1334
1335         /* Calculate the maximum number of clocks we will convert to ktime */
1336         gb_timesync_max_ktime_diff =
1337                 GB_TIMESYNC_MAX_KTIME_CONVERSION * gb_timesync_clock_rate;
1338
1339         pr_info("Time-Sync @ %lu Hz max ktime conversion +/- %d seconds\n",
1340                 gb_timesync_clock_rate, GB_TIMESYNC_MAX_KTIME_CONVERSION);
1341         return 0;
1342 }
1343
1344 void gb_timesync_exit(void)
1345 {
1346         gb_timesync_platform_exit();
1347 }