4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
7 * Released under the GPLv2 only.
9 #include <linux/debugfs.h>
10 #include <linux/hrtimer.h>
13 #include "greybus_trace.h"
16 * Minimum inter-strobe value of one millisecond is chosen because it
17 * just-about fits the common definition of a jiffy.
19 * Maximum value OTOH is constrained by the number of bits the SVC can fit
20 * into a 16 bit up-counter. The SVC configures the timer in microseconds
21 * so the maximum allowable value is 65535 microseconds. We clip that value
22 * to 10000 microseconds for the sake of using nice round base 10 numbers
23 * and since right-now there's no imaginable use-case requiring anything
24 * other than a one millisecond inter-strobe time, let alone something
25 * higher than ten milliseconds.
27 #define GB_TIMESYNC_STROBE_DELAY_US 1000
28 #define GB_TIMESYNC_DEFAULT_OFFSET_US 1000
30 /* Work queue timers long, short and SVC strobe timeout */
31 #define GB_TIMESYNC_DELAYED_WORK_LONG msecs_to_jiffies(1000)
32 #define GB_TIMESYNC_DELAYED_WORK_SHORT msecs_to_jiffies(1)
33 #define GB_TIMESYNC_MAX_WAIT_SVC msecs_to_jiffies(5000)
34 #define GB_TIMESYNC_KTIME_UPDATE msecs_to_jiffies(1000)
35 #define GB_TIMESYNC_MAX_KTIME_CONVERSION 15
37 /* Reported nanoseconds/femtoseconds per clock */
38 static u64 gb_timesync_ns_per_clock;
39 static u64 gb_timesync_fs_per_clock;
41 /* Maximum difference we will accept converting FrameTime to ktime */
42 static u32 gb_timesync_max_ktime_diff;
44 /* Reported clock rate */
45 static unsigned long gb_timesync_clock_rate;
48 static void gb_timesync_worker(struct work_struct *work);
50 /* List of SVCs with one FrameTime per SVC */
51 static LIST_HEAD(gb_timesync_svc_list);
53 /* Synchronize parallel contexts accessing a valid timesync_svc pointer */
54 static DEFINE_MUTEX(gb_timesync_svc_list_mutex);
56 /* Structure to convert from FrameTime to timespec/ktime */
57 struct gb_timesync_frame_time_data {
62 struct gb_timesync_svc {
63 struct list_head list;
64 struct list_head interface_list;
66 struct gb_timesync_host_device *timesync_hd;
68 spinlock_t spinlock; /* Per SVC spinlock to sync with ISR */
69 struct mutex mutex; /* Per SVC mutex for regular synchronization */
71 struct dentry *frame_time_dentry;
72 struct dentry *frame_ktime_dentry;
73 struct workqueue_struct *work_queue;
74 wait_queue_head_t wait_queue;
75 struct delayed_work delayed_work;
76 struct timer_list ktime_timer;
78 /* The current local FrameTime */
79 u64 frame_time_offset;
80 struct gb_timesync_frame_time_data strobe_data[GB_TIMESYNC_MAX_STROBES];
81 struct gb_timesync_frame_time_data ktime_data;
83 /* The SVC FrameTime and relative AP FrameTime @ last TIMESYNC_PING */
84 u64 svc_ping_frame_time;
85 u64 ap_ping_frame_time;
87 /* Transitory settings */
98 struct gb_timesync_host_device {
99 struct list_head list;
100 struct gb_host_device *hd;
104 struct gb_timesync_interface {
105 struct list_head list;
106 struct gb_interface *interface;
110 enum gb_timesync_state {
111 GB_TIMESYNC_STATE_INVALID = 0,
112 GB_TIMESYNC_STATE_INACTIVE = 1,
113 GB_TIMESYNC_STATE_INIT = 2,
114 GB_TIMESYNC_STATE_WAIT_SVC = 3,
115 GB_TIMESYNC_STATE_AUTHORITATIVE = 4,
116 GB_TIMESYNC_STATE_PING = 5,
117 GB_TIMESYNC_STATE_ACTIVE = 6,
120 static void gb_timesync_ktime_timer_fn(unsigned long data);
122 static u64 gb_timesync_adjust_count(struct gb_timesync_svc *timesync_svc,
125 if (timesync_svc->offset_down)
126 return counts - timesync_svc->frame_time_offset;
128 return counts + timesync_svc->frame_time_offset;
132 * This function provides the authoritative FrameTime to a calling function. It
133 * is designed to be lockless and should remain that way the caller is assumed
136 static u64 __gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
138 u64 clocks = gb_timesync_platform_get_counter();
140 return gb_timesync_adjust_count(timesync_svc, clocks);
143 static void gb_timesync_schedule_svc_timeout(struct gb_timesync_svc
146 queue_delayed_work(timesync_svc->work_queue,
147 ×ync_svc->delayed_work,
148 GB_TIMESYNC_MAX_WAIT_SVC);
151 static void gb_timesync_set_state(struct gb_timesync_svc *timesync_svc,
155 case GB_TIMESYNC_STATE_INVALID:
156 timesync_svc->state = state;
157 wake_up(×ync_svc->wait_queue);
159 case GB_TIMESYNC_STATE_INACTIVE:
160 timesync_svc->state = state;
161 wake_up(×ync_svc->wait_queue);
163 case GB_TIMESYNC_STATE_INIT:
164 if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) {
165 timesync_svc->strobe = 0;
166 timesync_svc->frame_time_offset = 0;
167 timesync_svc->state = state;
168 cancel_delayed_work(×ync_svc->delayed_work);
169 queue_delayed_work(timesync_svc->work_queue,
170 ×ync_svc->delayed_work,
171 GB_TIMESYNC_DELAYED_WORK_LONG);
174 case GB_TIMESYNC_STATE_WAIT_SVC:
175 if (timesync_svc->state == GB_TIMESYNC_STATE_INIT)
176 timesync_svc->state = state;
178 case GB_TIMESYNC_STATE_AUTHORITATIVE:
179 if (timesync_svc->state == GB_TIMESYNC_STATE_WAIT_SVC) {
180 timesync_svc->state = state;
181 cancel_delayed_work(×ync_svc->delayed_work);
182 queue_delayed_work(timesync_svc->work_queue,
183 ×ync_svc->delayed_work, 0);
186 case GB_TIMESYNC_STATE_PING:
187 if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) {
188 timesync_svc->state = state;
189 queue_delayed_work(timesync_svc->work_queue,
190 ×ync_svc->delayed_work,
191 GB_TIMESYNC_DELAYED_WORK_SHORT);
194 case GB_TIMESYNC_STATE_ACTIVE:
195 if (timesync_svc->state == GB_TIMESYNC_STATE_AUTHORITATIVE ||
196 timesync_svc->state == GB_TIMESYNC_STATE_PING) {
197 timesync_svc->state = state;
198 wake_up(×ync_svc->wait_queue);
203 if (WARN_ON(timesync_svc->state != state)) {
204 pr_err("Invalid state transition %d=>%d\n",
205 timesync_svc->state, state);
209 static void gb_timesync_set_state_atomic(struct gb_timesync_svc *timesync_svc,
214 spin_lock_irqsave(×ync_svc->spinlock, flags);
215 gb_timesync_set_state(timesync_svc, state);
216 spin_unlock_irqrestore(×ync_svc->spinlock, flags);
219 static u64 gb_timesync_diff(u64 x, u64 y)
227 static void gb_timesync_adjust_to_svc(struct gb_timesync_svc *svc,
228 u64 svc_frame_time, u64 ap_frame_time)
230 if (svc_frame_time > ap_frame_time) {
231 svc->frame_time_offset = svc_frame_time - ap_frame_time;
232 svc->offset_down = false;
234 svc->frame_time_offset = ap_frame_time - svc_frame_time;
235 svc->offset_down = true;
240 * Associate a FrameTime with a ktime timestamp represented as struct timespec
241 * Requires the calling context to hold timesync_svc->mutex
243 static void gb_timesync_store_ktime(struct gb_timesync_svc *timesync_svc,
244 struct timespec ts, u64 frame_time)
246 timesync_svc->ktime_data.ts = ts;
247 timesync_svc->ktime_data.frame_time = frame_time;
251 * Find the two pulses that best-match our expected inter-strobe gap and
252 * then calculate the difference between the SVC time at the second pulse
253 * to the local time at the second pulse.
255 static void gb_timesync_collate_frame_time(struct gb_timesync_svc *timesync_svc,
259 u64 delta, ap_frame_time;
260 u64 strobe_delay_ns = GB_TIMESYNC_STROBE_DELAY_US * NSEC_PER_USEC;
263 for (i = 1; i < GB_TIMESYNC_MAX_STROBES; i++) {
264 delta = timesync_svc->strobe_data[i].frame_time -
265 timesync_svc->strobe_data[i - 1].frame_time;
266 delta *= gb_timesync_ns_per_clock;
267 delta = gb_timesync_diff(delta, strobe_delay_ns);
269 if (!least || delta < least) {
271 gb_timesync_adjust_to_svc(timesync_svc, frame_time[i],
272 timesync_svc->strobe_data[i].frame_time);
274 ap_frame_time = timesync_svc->strobe_data[i].frame_time;
275 ap_frame_time = gb_timesync_adjust_count(timesync_svc,
277 gb_timesync_store_ktime(timesync_svc,
278 timesync_svc->strobe_data[i].ts,
281 pr_debug("adjust %s local %llu svc %llu delta %llu\n",
282 timesync_svc->offset_down ? "down" : "up",
283 timesync_svc->strobe_data[i].frame_time,
284 frame_time[i], delta);
289 static void gb_timesync_teardown(struct gb_timesync_svc *timesync_svc)
291 struct gb_timesync_interface *timesync_interface;
292 struct gb_svc *svc = timesync_svc->svc;
293 struct gb_interface *interface;
294 struct gb_host_device *hd;
297 list_for_each_entry(timesync_interface,
298 ×ync_svc->interface_list, list) {
299 interface = timesync_interface->interface;
300 ret = gb_interface_timesync_disable(interface);
302 dev_err(&interface->dev,
303 "interface timesync_disable %d\n", ret);
307 hd = timesync_svc->timesync_hd->hd;
308 ret = hd->driver->timesync_disable(hd);
310 dev_err(&hd->dev, "host timesync_disable %d\n",
314 gb_svc_timesync_wake_pins_release(svc);
315 gb_svc_timesync_disable(svc);
316 gb_timesync_platform_unlock_bus();
318 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
321 static void gb_timesync_platform_lock_bus_fail(struct gb_timesync_svc
322 *timesync_svc, int ret)
324 if (ret == -EAGAIN) {
325 gb_timesync_set_state(timesync_svc, timesync_svc->state);
327 pr_err("Failed to lock timesync bus %d\n", ret);
328 gb_timesync_set_state(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
332 static void gb_timesync_enable(struct gb_timesync_svc *timesync_svc)
334 struct gb_svc *svc = timesync_svc->svc;
335 struct gb_host_device *hd;
336 struct gb_timesync_interface *timesync_interface;
337 struct gb_interface *interface;
339 unsigned long clock_rate = gb_timesync_clock_rate;
343 * Get access to the wake pins in the AP and SVC
344 * Release these pins either in gb_timesync_teardown() or in
345 * gb_timesync_authoritative()
347 ret = gb_timesync_platform_lock_bus(timesync_svc);
349 gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
352 ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
355 "gb_svc_timesync_wake_pins_acquire %d\n", ret);
356 gb_timesync_teardown(timesync_svc);
360 /* Choose an initial time in the future */
361 init_frame_time = __gb_timesync_get_frame_time(timesync_svc) + 100000UL;
363 /* Send enable command to all relevant participants */
364 list_for_each_entry(timesync_interface, ×ync_svc->interface_list,
366 interface = timesync_interface->interface;
367 ret = gb_interface_timesync_enable(interface,
368 GB_TIMESYNC_MAX_STROBES,
370 GB_TIMESYNC_STROBE_DELAY_US,
373 dev_err(&interface->dev,
374 "interface timesync_enable %d\n", ret);
378 hd = timesync_svc->timesync_hd->hd;
379 ret = hd->driver->timesync_enable(hd, GB_TIMESYNC_MAX_STROBES,
381 GB_TIMESYNC_STROBE_DELAY_US,
384 dev_err(&hd->dev, "host timesync_enable %d\n",
388 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_WAIT_SVC);
389 ret = gb_svc_timesync_enable(svc, GB_TIMESYNC_MAX_STROBES,
391 GB_TIMESYNC_STROBE_DELAY_US,
395 "gb_svc_timesync_enable %d\n", ret);
396 gb_timesync_teardown(timesync_svc);
400 /* Schedule a timeout waiting for SVC to complete strobing */
401 gb_timesync_schedule_svc_timeout(timesync_svc);
404 static void gb_timesync_authoritative(struct gb_timesync_svc *timesync_svc)
406 struct gb_svc *svc = timesync_svc->svc;
407 struct gb_host_device *hd;
408 struct gb_timesync_interface *timesync_interface;
409 struct gb_interface *interface;
410 u64 svc_frame_time[GB_TIMESYNC_MAX_STROBES];
413 /* Get authoritative time from SVC and adjust local clock */
414 ret = gb_svc_timesync_authoritative(svc, svc_frame_time);
417 "gb_svc_timesync_authoritative %d\n", ret);
418 gb_timesync_teardown(timesync_svc);
421 gb_timesync_collate_frame_time(timesync_svc, svc_frame_time);
423 /* Transmit authoritative time to downstream slaves */
424 hd = timesync_svc->timesync_hd->hd;
425 ret = hd->driver->timesync_authoritative(hd, svc_frame_time);
427 dev_err(&hd->dev, "host timesync_authoritative %d\n", ret);
429 list_for_each_entry(timesync_interface,
430 ×ync_svc->interface_list, list) {
431 interface = timesync_interface->interface;
432 ret = gb_interface_timesync_authoritative(
436 dev_err(&interface->dev,
437 "interface timesync_authoritative %d\n", ret);
441 /* Release wake pins */
442 gb_svc_timesync_wake_pins_release(svc);
443 gb_timesync_platform_unlock_bus();
445 /* Transition to state ACTIVE */
446 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
448 /* Schedule a ping to verify the synchronized system time */
449 timesync_svc->print_ping = true;
450 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_PING);
453 static int __gb_timesync_get_status(struct gb_timesync_svc *timesync_svc)
457 switch (timesync_svc->state) {
458 case GB_TIMESYNC_STATE_INVALID:
459 case GB_TIMESYNC_STATE_INACTIVE:
462 case GB_TIMESYNC_STATE_INIT:
463 case GB_TIMESYNC_STATE_WAIT_SVC:
464 case GB_TIMESYNC_STATE_AUTHORITATIVE:
465 case GB_TIMESYNC_STATE_PING:
468 case GB_TIMESYNC_STATE_ACTIVE:
476 * This routine takes a FrameTime and derives the difference with-respect
477 * to a reference FrameTime/ktime pair. It then returns the calculated
478 * ktime based on the difference between the supplied FrameTime and
479 * the reference FrameTime.
481 * The time difference is calculated to six decimal places. Taking 19.2MHz
482 * as an example this means we have 52.083333~ nanoseconds per clock or
483 * 52083333~ femtoseconds per clock.
485 * Naively taking the count difference and converting to
486 * seconds/nanoseconds would quickly see the 0.0833 component produce
487 * noticeable errors. For example a time difference of one second would
488 * loose 19200000 * 0.08333x nanoseconds or 1.59 seconds.
490 * In contrast calculating in femtoseconds the same example of 19200000 *
491 * 0.000000083333x nanoseconds per count of error is just 1.59 nanoseconds!
493 * Continuing the example of 19.2 MHz we cap the maximum error difference
494 * at a worst-case 0.3 microseconds over a potential calculation window of
495 * abount 15 seconds, meaning you can convert a FrameTime that is <= 15
496 * seconds older/younger than the reference time with a maximum error of
497 * 0.2385 useconds. Note 19.2MHz is an example frequency not a requirement.
499 static int gb_timesync_to_timespec(struct gb_timesync_svc *timesync_svc,
500 u64 frame_time, struct timespec *ts)
503 u64 delta_fs, counts, sec, nsec;
507 memset(ts, 0x00, sizeof(*ts));
508 mutex_lock(×ync_svc->mutex);
509 spin_lock_irqsave(×ync_svc->spinlock, flags);
511 ret = __gb_timesync_get_status(timesync_svc);
515 /* Support calculating ktime upwards or downwards from the reference */
516 if (frame_time < timesync_svc->ktime_data.frame_time) {
518 counts = timesync_svc->ktime_data.frame_time - frame_time;
521 counts = frame_time - timesync_svc->ktime_data.frame_time;
524 /* Enforce the .23 of a usecond boundary @ 19.2MHz */
525 if (counts > gb_timesync_max_ktime_diff) {
530 /* Determine the time difference in femtoseconds */
531 delta_fs = counts * gb_timesync_fs_per_clock;
533 /* Convert to seconds */
535 do_div(sec, NSEC_PER_SEC);
536 do_div(sec, 1000000UL);
538 /* Get the nanosecond remainder */
539 nsec = do_div(delta_fs, sec);
540 do_div(nsec, 1000000UL);
543 /* Add the calculated offset - overflow nanoseconds upwards */
544 ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec + sec;
545 ts->tv_nsec = timesync_svc->ktime_data.ts.tv_nsec + nsec;
546 if (ts->tv_nsec >= NSEC_PER_SEC) {
548 ts->tv_nsec -= NSEC_PER_SEC;
551 /* Subtract the difference over/underflow as necessary */
552 if (nsec > timesync_svc->ktime_data.ts.tv_nsec) {
554 nsec = nsec + timesync_svc->ktime_data.ts.tv_nsec;
555 nsec = do_div(nsec, NSEC_PER_SEC);
557 nsec = timesync_svc->ktime_data.ts.tv_nsec - nsec;
559 /* Cannot return a negative second value */
560 if (sec > timesync_svc->ktime_data.ts.tv_sec) {
564 ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec - sec;
568 spin_unlock_irqrestore(×ync_svc->spinlock, flags);
569 mutex_unlock(×ync_svc->mutex);
573 static size_t gb_timesync_log_frame_time(struct gb_timesync_svc *timesync_svc,
574 char *buf, size_t buflen)
576 struct gb_svc *svc = timesync_svc->svc;
577 struct gb_host_device *hd;
578 struct gb_timesync_interface *timesync_interface;
579 struct gb_interface *interface;
584 off = snprintf(buf, buflen, "%s frametime: ap=%llu %s=%llu ",
585 greybus_bus_type.name,
586 timesync_svc->ap_ping_frame_time, dev_name(&svc->dev),
587 timesync_svc->svc_ping_frame_time);
592 hd = timesync_svc->timesync_hd->hd;
593 off += snprintf(&buf[off], len, "%s=%llu ", dev_name(&hd->dev),
594 timesync_svc->timesync_hd->ping_frame_time);
598 list_for_each_entry(timesync_interface,
599 ×ync_svc->interface_list, list) {
601 interface = timesync_interface->interface;
602 off += snprintf(&buf[off], len, "%s=%llu ",
603 dev_name(&interface->dev),
604 timesync_interface->ping_frame_time);
609 off += snprintf(&buf[off], len, "\n");
613 static size_t gb_timesync_log_frame_ktime(struct gb_timesync_svc *timesync_svc,
614 char *buf, size_t buflen)
616 struct gb_svc *svc = timesync_svc->svc;
617 struct gb_host_device *hd;
618 struct gb_timesync_interface *timesync_interface;
619 struct gb_interface *interface;
625 gb_timesync_to_timespec(timesync_svc, timesync_svc->ap_ping_frame_time,
627 off = snprintf(buf, buflen, "%s frametime: ap=%lu.%lu ",
628 greybus_bus_type.name, ts.tv_sec, ts.tv_nsec);
634 gb_timesync_to_timespec(timesync_svc, timesync_svc->svc_ping_frame_time,
636 off += snprintf(&buf[off], len, "%s=%lu.%lu ", dev_name(&svc->dev),
637 ts.tv_sec, ts.tv_nsec);
643 hd = timesync_svc->timesync_hd->hd;
644 gb_timesync_to_timespec(timesync_svc,
645 timesync_svc->timesync_hd->ping_frame_time,
647 off += snprintf(&buf[off], len, "%s=%lu.%lu ",
649 ts.tv_sec, ts.tv_nsec);
654 list_for_each_entry(timesync_interface,
655 ×ync_svc->interface_list, list) {
656 interface = timesync_interface->interface;
657 gb_timesync_to_timespec(timesync_svc,
658 timesync_interface->ping_frame_time,
660 off += snprintf(&buf[off], len, "%s=%lu.%lu ",
661 dev_name(&interface->dev),
662 ts.tv_sec, ts.tv_nsec);
667 off += snprintf(&buf[off], len, "\n");
673 * Send an SVC initiated wake 'ping' to each TimeSync participant.
674 * Get the FrameTime from each participant associated with the wake
677 static void gb_timesync_ping(struct gb_timesync_svc *timesync_svc)
679 struct gb_svc *svc = timesync_svc->svc;
680 struct gb_host_device *hd;
681 struct gb_timesync_interface *timesync_interface;
682 struct gb_control *control;
683 u64 *ping_frame_time;
686 /* Get access to the wake pins in the AP and SVC */
687 ret = gb_timesync_platform_lock_bus(timesync_svc);
689 gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
692 ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
695 "gb_svc_timesync_wake_pins_acquire %d\n", ret);
696 gb_timesync_teardown(timesync_svc);
700 /* Have SVC generate a timesync ping */
701 timesync_svc->capture_ping = true;
702 timesync_svc->svc_ping_frame_time = 0;
703 ret = gb_svc_timesync_ping(svc, ×ync_svc->svc_ping_frame_time);
704 timesync_svc->capture_ping = false;
707 "gb_svc_timesync_ping %d\n", ret);
708 gb_timesync_teardown(timesync_svc);
712 /* Get the ping FrameTime from each APB/GPB */
713 hd = timesync_svc->timesync_hd->hd;
714 timesync_svc->timesync_hd->ping_frame_time = 0;
715 ret = hd->driver->timesync_get_last_event(hd,
716 ×ync_svc->timesync_hd->ping_frame_time);
718 dev_err(&hd->dev, "host timesync_get_last_event %d\n", ret);
720 list_for_each_entry(timesync_interface,
721 ×ync_svc->interface_list, list) {
722 control = timesync_interface->interface->control;
723 timesync_interface->ping_frame_time = 0;
724 ping_frame_time = ×ync_interface->ping_frame_time;
725 ret = gb_control_timesync_get_last_event(control,
728 dev_err(×ync_interface->interface->dev,
729 "gb_control_timesync_get_last_event %d\n", ret);
733 /* Ping success - move to timesync active */
734 gb_svc_timesync_wake_pins_release(svc);
735 gb_timesync_platform_unlock_bus();
736 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
739 static void gb_timesync_log_ping_time(struct gb_timesync_svc *timesync_svc)
743 if (!timesync_svc->print_ping)
746 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
748 gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
755 * Perform the actual work of scheduled TimeSync logic.
757 static void gb_timesync_worker(struct work_struct *work)
759 struct delayed_work *delayed_work = to_delayed_work(work);
760 struct gb_timesync_svc *timesync_svc =
761 container_of(delayed_work, struct gb_timesync_svc, delayed_work);
763 mutex_lock(×ync_svc->mutex);
765 switch (timesync_svc->state) {
766 case GB_TIMESYNC_STATE_INIT:
767 gb_timesync_enable(timesync_svc);
770 case GB_TIMESYNC_STATE_WAIT_SVC:
771 dev_err(×ync_svc->svc->dev,
772 "timeout SVC strobe completion\n");
773 gb_timesync_teardown(timesync_svc);
776 case GB_TIMESYNC_STATE_AUTHORITATIVE:
777 gb_timesync_authoritative(timesync_svc);
780 case GB_TIMESYNC_STATE_PING:
781 gb_timesync_ping(timesync_svc);
782 gb_timesync_log_ping_time(timesync_svc);
786 pr_err("Invalid state %d for delayed work\n",
787 timesync_svc->state);
791 mutex_unlock(×ync_svc->mutex);
795 * Schedule a new TimeSync INIT or PING operation serialized w/r to
796 * gb_timesync_worker().
798 static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state)
802 if (state != GB_TIMESYNC_STATE_INIT && state != GB_TIMESYNC_STATE_PING)
805 mutex_lock(×ync_svc->mutex);
806 if (timesync_svc->state == GB_TIMESYNC_STATE_INACTIVE ||
807 timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) {
808 gb_timesync_set_state_atomic(timesync_svc, state);
812 mutex_unlock(×ync_svc->mutex);
816 static int __gb_timesync_schedule_synchronous(
817 struct gb_timesync_svc *timesync_svc, int state)
822 ret = gb_timesync_schedule(timesync_svc, state);
826 ret = wait_event_interruptible(timesync_svc->wait_queue,
827 (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE ||
828 timesync_svc->state == GB_TIMESYNC_STATE_INACTIVE ||
829 timesync_svc->state == GB_TIMESYNC_STATE_INVALID));
833 mutex_lock(×ync_svc->mutex);
834 spin_lock_irqsave(×ync_svc->spinlock, flags);
836 ret = __gb_timesync_get_status(timesync_svc);
838 spin_unlock_irqrestore(×ync_svc->spinlock, flags);
839 mutex_unlock(×ync_svc->mutex);
844 static struct gb_timesync_svc *gb_timesync_find_timesync_svc(
845 struct gb_host_device *hd)
847 struct gb_timesync_svc *timesync_svc;
849 list_for_each_entry(timesync_svc, &gb_timesync_svc_list, list) {
850 if (timesync_svc->svc == hd->svc)
856 static struct gb_timesync_interface *gb_timesync_find_timesync_interface(
857 struct gb_timesync_svc *timesync_svc,
858 struct gb_interface *interface)
860 struct gb_timesync_interface *timesync_interface;
862 list_for_each_entry(timesync_interface, ×ync_svc->interface_list, list) {
863 if (timesync_interface->interface == interface)
864 return timesync_interface;
869 int gb_timesync_schedule_synchronous(struct gb_interface *interface)
872 struct gb_timesync_svc *timesync_svc;
874 if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
877 mutex_lock(&gb_timesync_svc_list_mutex);
878 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
884 ret = __gb_timesync_schedule_synchronous(timesync_svc,
885 GB_TIMESYNC_STATE_INIT);
887 mutex_unlock(&gb_timesync_svc_list_mutex);
890 EXPORT_SYMBOL_GPL(gb_timesync_schedule_synchronous);
892 void gb_timesync_schedule_asynchronous(struct gb_interface *interface)
894 struct gb_timesync_svc *timesync_svc;
896 if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
899 mutex_lock(&gb_timesync_svc_list_mutex);
900 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
904 gb_timesync_schedule(timesync_svc, GB_TIMESYNC_STATE_INIT);
906 mutex_unlock(&gb_timesync_svc_list_mutex);
909 EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous);
911 static ssize_t gb_timesync_ping_read(struct file *file, char __user *ubuf,
912 size_t len, loff_t *offset, bool ktime)
914 struct gb_timesync_svc *timesync_svc = file->f_inode->i_private;
918 mutex_lock(&gb_timesync_svc_list_mutex);
919 mutex_lock(×ync_svc->mutex);
920 if (list_empty(×ync_svc->interface_list))
922 timesync_svc->print_ping = false;
923 mutex_unlock(×ync_svc->mutex);
927 ret = __gb_timesync_schedule_synchronous(timesync_svc,
928 GB_TIMESYNC_STATE_PING);
932 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
939 ret = gb_timesync_log_frame_ktime(timesync_svc, buf, PAGE_SIZE);
941 ret = gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
943 ret = simple_read_from_buffer(ubuf, len, offset, buf, ret);
946 mutex_unlock(&gb_timesync_svc_list_mutex);
950 static ssize_t gb_timesync_ping_read_frame_time(struct file *file,
952 size_t len, loff_t *offset)
954 return gb_timesync_ping_read(file, buf, len, offset, false);
957 static ssize_t gb_timesync_ping_read_frame_ktime(struct file *file,
959 size_t len, loff_t *offset)
961 return gb_timesync_ping_read(file, buf, len, offset, true);
964 static const struct file_operations gb_timesync_debugfs_frame_time_ops = {
965 .read = gb_timesync_ping_read_frame_time,
968 static const struct file_operations gb_timesync_debugfs_frame_ktime_ops = {
969 .read = gb_timesync_ping_read_frame_ktime,
972 static int gb_timesync_hd_add(struct gb_timesync_svc *timesync_svc,
973 struct gb_host_device *hd)
975 struct gb_timesync_host_device *timesync_hd;
977 timesync_hd = kzalloc(sizeof(*timesync_hd), GFP_KERNEL);
981 WARN_ON(timesync_svc->timesync_hd);
982 timesync_hd->hd = hd;
983 timesync_svc->timesync_hd = timesync_hd;
988 static void gb_timesync_hd_remove(struct gb_timesync_svc *timesync_svc,
989 struct gb_host_device *hd)
991 if (timesync_svc->timesync_hd->hd == hd) {
992 kfree(timesync_svc->timesync_hd);
993 timesync_svc->timesync_hd = NULL;
999 int gb_timesync_svc_add(struct gb_svc *svc)
1001 struct gb_timesync_svc *timesync_svc;
1004 timesync_svc = kzalloc(sizeof(*timesync_svc), GFP_KERNEL);
1008 timesync_svc->work_queue =
1009 create_singlethread_workqueue("gb-timesync-work_queue");
1011 if (!timesync_svc->work_queue) {
1012 kfree(timesync_svc);
1016 mutex_lock(&gb_timesync_svc_list_mutex);
1017 INIT_LIST_HEAD(×ync_svc->interface_list);
1018 INIT_DELAYED_WORK(×ync_svc->delayed_work, gb_timesync_worker);
1019 mutex_init(×ync_svc->mutex);
1020 spin_lock_init(×ync_svc->spinlock);
1021 init_waitqueue_head(×ync_svc->wait_queue);
1023 timesync_svc->svc = svc;
1024 timesync_svc->frame_time_offset = 0;
1025 timesync_svc->capture_ping = false;
1026 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
1028 timesync_svc->frame_time_dentry =
1029 debugfs_create_file("frame-time", S_IRUGO, svc->debugfs_dentry,
1031 &gb_timesync_debugfs_frame_time_ops);
1032 timesync_svc->frame_ktime_dentry =
1033 debugfs_create_file("frame-ktime", S_IRUGO, svc->debugfs_dentry,
1035 &gb_timesync_debugfs_frame_ktime_ops);
1037 list_add(×ync_svc->list, &gb_timesync_svc_list);
1038 ret = gb_timesync_hd_add(timesync_svc, svc->hd);
1040 list_del(×ync_svc->list);
1041 debugfs_remove(timesync_svc->frame_ktime_dentry);
1042 debugfs_remove(timesync_svc->frame_time_dentry);
1043 destroy_workqueue(timesync_svc->work_queue);
1044 kfree(timesync_svc);
1048 init_timer(×ync_svc->ktime_timer);
1049 timesync_svc->ktime_timer.function = gb_timesync_ktime_timer_fn;
1050 timesync_svc->ktime_timer.expires = jiffies + GB_TIMESYNC_KTIME_UPDATE;
1051 timesync_svc->ktime_timer.data = (unsigned long)timesync_svc;
1052 add_timer(×ync_svc->ktime_timer);
1054 mutex_unlock(&gb_timesync_svc_list_mutex);
1057 EXPORT_SYMBOL_GPL(gb_timesync_svc_add);
1059 void gb_timesync_svc_remove(struct gb_svc *svc)
1061 struct gb_timesync_svc *timesync_svc;
1062 struct gb_timesync_interface *timesync_interface;
1063 struct gb_timesync_interface *next;
1065 mutex_lock(&gb_timesync_svc_list_mutex);
1066 timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1070 cancel_delayed_work_sync(×ync_svc->delayed_work);
1072 mutex_lock(×ync_svc->mutex);
1074 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INVALID);
1075 del_timer_sync(×ync_svc->ktime_timer);
1076 gb_timesync_teardown(timesync_svc);
1078 gb_timesync_hd_remove(timesync_svc, svc->hd);
1079 list_for_each_entry_safe(timesync_interface, next,
1080 ×ync_svc->interface_list, list) {
1081 list_del(×ync_interface->list);
1082 kfree(timesync_interface);
1084 debugfs_remove(timesync_svc->frame_ktime_dentry);
1085 debugfs_remove(timesync_svc->frame_time_dentry);
1086 destroy_workqueue(timesync_svc->work_queue);
1087 list_del(×ync_svc->list);
1089 mutex_unlock(×ync_svc->mutex);
1091 kfree(timesync_svc);
1093 mutex_unlock(&gb_timesync_svc_list_mutex);
1095 EXPORT_SYMBOL_GPL(gb_timesync_svc_remove);
1098 * Add a Greybus Interface to the set of TimeSync Interfaces.
1100 int gb_timesync_interface_add(struct gb_interface *interface)
1102 struct gb_timesync_svc *timesync_svc;
1103 struct gb_timesync_interface *timesync_interface;
1106 if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
1109 mutex_lock(&gb_timesync_svc_list_mutex);
1110 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1111 if (!timesync_svc) {
1116 timesync_interface = kzalloc(sizeof(*timesync_interface), GFP_KERNEL);
1117 if (!timesync_interface) {
1122 mutex_lock(×ync_svc->mutex);
1123 timesync_interface->interface = interface;
1124 list_add(×ync_interface->list, ×ync_svc->interface_list);
1125 timesync_svc->strobe_mask |= 1 << interface->interface_id;
1126 mutex_unlock(×ync_svc->mutex);
1129 mutex_unlock(&gb_timesync_svc_list_mutex);
1132 EXPORT_SYMBOL_GPL(gb_timesync_interface_add);
1135 * Remove a Greybus Interface from the set of TimeSync Interfaces.
1137 void gb_timesync_interface_remove(struct gb_interface *interface)
1139 struct gb_timesync_svc *timesync_svc;
1140 struct gb_timesync_interface *timesync_interface;
1142 if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
1145 mutex_lock(&gb_timesync_svc_list_mutex);
1146 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1150 timesync_interface = gb_timesync_find_timesync_interface(timesync_svc,
1152 if (!timesync_interface)
1155 mutex_lock(×ync_svc->mutex);
1156 timesync_svc->strobe_mask &= ~(1 << interface->interface_id);
1157 list_del(×ync_interface->list);
1158 kfree(timesync_interface);
1159 mutex_unlock(×ync_svc->mutex);
1161 mutex_unlock(&gb_timesync_svc_list_mutex);
1163 EXPORT_SYMBOL_GPL(gb_timesync_interface_remove);
1166 * Give the authoritative FrameTime to the calling function. Returns zero if we
1167 * are not in GB_TIMESYNC_STATE_ACTIVE.
1169 static u64 gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
1171 unsigned long flags;
1174 spin_lock_irqsave(×ync_svc->spinlock, flags);
1175 if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE)
1176 ret = __gb_timesync_get_frame_time(timesync_svc);
1179 spin_unlock_irqrestore(×ync_svc->spinlock, flags);
1183 u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface)
1185 struct gb_timesync_svc *timesync_svc;
1188 mutex_lock(&gb_timesync_svc_list_mutex);
1189 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1193 ret = gb_timesync_get_frame_time(timesync_svc);
1195 mutex_unlock(&gb_timesync_svc_list_mutex);
1198 EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_interface);
1200 u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc)
1202 struct gb_timesync_svc *timesync_svc;
1205 mutex_lock(&gb_timesync_svc_list_mutex);
1206 timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1210 ret = gb_timesync_get_frame_time(timesync_svc);
1212 mutex_unlock(&gb_timesync_svc_list_mutex);
1215 EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_svc);
1217 /* Incrementally updates the conversion base from FrameTime to ktime */
1218 static void gb_timesync_ktime_timer_fn(unsigned long data)
1220 struct gb_timesync_svc *timesync_svc =
1221 (struct gb_timesync_svc *)data;
1222 unsigned long flags;
1226 spin_lock_irqsave(×ync_svc->spinlock, flags);
1228 if (timesync_svc->state != GB_TIMESYNC_STATE_ACTIVE)
1232 frame_time = __gb_timesync_get_frame_time(timesync_svc);
1233 gb_timesync_store_ktime(timesync_svc, ts, frame_time);
1236 spin_unlock_irqrestore(×ync_svc->spinlock, flags);
1237 mod_timer(×ync_svc->ktime_timer,
1238 jiffies + GB_TIMESYNC_KTIME_UPDATE);
1241 int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time,
1242 struct timespec *ts)
1244 struct gb_timesync_svc *timesync_svc;
1247 mutex_lock(&gb_timesync_svc_list_mutex);
1248 timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1249 if (!timesync_svc) {
1253 ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
1255 mutex_unlock(&gb_timesync_svc_list_mutex);
1258 EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_svc);
1260 int gb_timesync_to_timespec_by_interface(struct gb_interface *interface,
1261 u64 frame_time, struct timespec *ts)
1263 struct gb_timesync_svc *timesync_svc;
1266 mutex_lock(&gb_timesync_svc_list_mutex);
1267 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1268 if (!timesync_svc) {
1273 ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
1275 mutex_unlock(&gb_timesync_svc_list_mutex);
1278 EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_interface);
1280 void gb_timesync_irq(struct gb_timesync_svc *timesync_svc)
1282 unsigned long flags;
1284 bool strobe_is_ping = true;
1288 strobe_time = __gb_timesync_get_frame_time(timesync_svc);
1290 spin_lock_irqsave(×ync_svc->spinlock, flags);
1292 if (timesync_svc->state == GB_TIMESYNC_STATE_PING) {
1293 if (!timesync_svc->capture_ping)
1295 timesync_svc->ap_ping_frame_time = strobe_time;
1297 } else if (timesync_svc->state != GB_TIMESYNC_STATE_WAIT_SVC) {
1301 timesync_svc->strobe_data[timesync_svc->strobe].frame_time = strobe_time;
1302 timesync_svc->strobe_data[timesync_svc->strobe].ts = ts;
1304 if (++timesync_svc->strobe == GB_TIMESYNC_MAX_STROBES) {
1305 gb_timesync_set_state(timesync_svc,
1306 GB_TIMESYNC_STATE_AUTHORITATIVE);
1308 strobe_is_ping = false;
1310 trace_gb_timesync_irq(strobe_is_ping, timesync_svc->strobe,
1311 GB_TIMESYNC_MAX_STROBES, strobe_time);
1313 spin_unlock_irqrestore(×ync_svc->spinlock, flags);
1315 EXPORT_SYMBOL(gb_timesync_irq);
1317 int __init gb_timesync_init(void)
1321 ret = gb_timesync_platform_init();
1323 pr_err("timesync platform init fail!\n");
1327 gb_timesync_clock_rate = gb_timesync_platform_get_clock_rate();
1329 /* Calculate nanoseconds and femtoseconds per clock */
1330 gb_timesync_fs_per_clock = FSEC_PER_SEC;
1331 do_div(gb_timesync_fs_per_clock, gb_timesync_clock_rate);
1332 gb_timesync_ns_per_clock = NSEC_PER_SEC;
1333 do_div(gb_timesync_ns_per_clock, gb_timesync_clock_rate);
1335 /* Calculate the maximum number of clocks we will convert to ktime */
1336 gb_timesync_max_ktime_diff =
1337 GB_TIMESYNC_MAX_KTIME_CONVERSION * gb_timesync_clock_rate;
1339 pr_info("Time-Sync @ %lu Hz max ktime conversion +/- %d seconds\n",
1340 gb_timesync_clock_rate, GB_TIMESYNC_MAX_KTIME_CONVERSION);
1344 void gb_timesync_exit(void)
1346 gb_timesync_platform_exit();