4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
7 * Released under the GPLv2 only.
9 #include <linux/debugfs.h>
10 #include <linux/hrtimer.h>
13 #include "greybus_trace.h"
16 * Minimum inter-strobe value of one millisecond is chosen because it
17 * just-about fits the common definition of a jiffy.
19 * Maximum value OTOH is constrained by the number of bits the SVC can fit
20 * into a 16 bit up-counter. The SVC configures the timer in microseconds
21 * so the maximum allowable value is 65535 microseconds. We clip that value
22 * to 10000 microseconds for the sake of using nice round base 10 numbers
23 * and since right-now there's no imaginable use-case requiring anything
24 * other than a one millisecond inter-strobe time, let alone something
25 * higher than ten milliseconds.
27 #define GB_TIMESYNC_STROBE_DELAY_US 1000
28 #define GB_TIMESYNC_DEFAULT_OFFSET_US 1000
30 /* Work queue timers long, short and SVC strobe timeout */
31 #define GB_TIMESYNC_DELAYED_WORK_LONG msecs_to_jiffies(1000)
32 #define GB_TIMESYNC_DELAYED_WORK_SHORT msecs_to_jiffies(1)
33 #define GB_TIMESYNC_MAX_WAIT_SVC msecs_to_jiffies(5000)
34 #define GB_TIMESYNC_KTIME_UPDATE msecs_to_jiffies(1000)
35 #define GB_TIMESYNC_MAX_KTIME_CONVERSION 15
37 /* Reported nanoseconds/femtoseconds per clock */
38 static u64 gb_timesync_ns_per_clock
;
39 static u64 gb_timesync_fs_per_clock
;
41 /* Maximum difference we will accept converting FrameTime to ktime */
42 static u32 gb_timesync_max_ktime_diff
;
44 /* Reported clock rate */
45 static unsigned long gb_timesync_clock_rate
;
48 static void gb_timesync_worker(struct work_struct
*work
);
50 /* List of SVCs with one FrameTime per SVC */
51 static LIST_HEAD(gb_timesync_svc_list
);
53 /* Synchronize parallel contexts accessing a valid timesync_svc pointer */
54 static DEFINE_MUTEX(gb_timesync_svc_list_mutex
);
56 /* Structure to convert from FrameTime to timespec/ktime */
57 struct gb_timesync_frame_time_data
{
62 struct gb_timesync_svc
{
63 struct list_head list
;
64 struct list_head interface_list
;
66 struct gb_timesync_host_device
*timesync_hd
;
68 spinlock_t spinlock
; /* Per SVC spinlock to sync with ISR */
69 struct mutex mutex
; /* Per SVC mutex for regular synchronization */
71 struct dentry
*frame_time_dentry
;
72 struct dentry
*frame_ktime_dentry
;
73 struct workqueue_struct
*work_queue
;
74 wait_queue_head_t wait_queue
;
75 struct delayed_work delayed_work
;
76 struct timer_list ktime_timer
;
78 /* The current local FrameTime */
79 u64 frame_time_offset
;
80 struct gb_timesync_frame_time_data strobe_data
[GB_TIMESYNC_MAX_STROBES
];
81 struct gb_timesync_frame_time_data ktime_data
;
83 /* The SVC FrameTime and relative AP FrameTime @ last TIMESYNC_PING */
84 u64 svc_ping_frame_time
;
85 u64 ap_ping_frame_time
;
87 /* Transitory settings */
98 struct gb_timesync_host_device
{
99 struct list_head list
;
100 struct gb_host_device
*hd
;
104 struct gb_timesync_interface
{
105 struct list_head list
;
106 struct gb_interface
*interface
;
110 enum gb_timesync_state
{
111 GB_TIMESYNC_STATE_INVALID
= 0,
112 GB_TIMESYNC_STATE_INACTIVE
= 1,
113 GB_TIMESYNC_STATE_INIT
= 2,
114 GB_TIMESYNC_STATE_WAIT_SVC
= 3,
115 GB_TIMESYNC_STATE_AUTHORITATIVE
= 4,
116 GB_TIMESYNC_STATE_PING
= 5,
117 GB_TIMESYNC_STATE_ACTIVE
= 6,
120 static void gb_timesync_ktime_timer_fn(unsigned long data
);
122 static u64
gb_timesync_adjust_count(struct gb_timesync_svc
*timesync_svc
,
125 if (timesync_svc
->offset_down
)
126 return counts
- timesync_svc
->frame_time_offset
;
128 return counts
+ timesync_svc
->frame_time_offset
;
132 * This function provides the authoritative FrameTime to a calling function. It
133 * is designed to be lockless and should remain that way the caller is assumed
136 static u64
__gb_timesync_get_frame_time(struct gb_timesync_svc
*timesync_svc
)
138 u64 clocks
= gb_timesync_platform_get_counter();
140 return gb_timesync_adjust_count(timesync_svc
, clocks
);
143 static void gb_timesync_schedule_svc_timeout(struct gb_timesync_svc
146 queue_delayed_work(timesync_svc
->work_queue
,
147 ×ync_svc
->delayed_work
,
148 GB_TIMESYNC_MAX_WAIT_SVC
);
151 static void gb_timesync_set_state(struct gb_timesync_svc
*timesync_svc
,
155 case GB_TIMESYNC_STATE_INVALID
:
156 timesync_svc
->state
= state
;
157 wake_up(×ync_svc
->wait_queue
);
159 case GB_TIMESYNC_STATE_INACTIVE
:
160 if (timesync_svc
->state
!= GB_TIMESYNC_STATE_INIT
) {
161 timesync_svc
->state
= state
;
162 wake_up(×ync_svc
->wait_queue
);
165 case GB_TIMESYNC_STATE_INIT
:
166 if (timesync_svc
->state
!= GB_TIMESYNC_STATE_INVALID
) {
167 timesync_svc
->strobe
= 0;
168 timesync_svc
->frame_time_offset
= 0;
169 timesync_svc
->state
= state
;
170 cancel_delayed_work(×ync_svc
->delayed_work
);
171 queue_delayed_work(timesync_svc
->work_queue
,
172 ×ync_svc
->delayed_work
,
173 GB_TIMESYNC_DELAYED_WORK_LONG
);
176 case GB_TIMESYNC_STATE_WAIT_SVC
:
177 if (timesync_svc
->state
== GB_TIMESYNC_STATE_INIT
)
178 timesync_svc
->state
= state
;
180 case GB_TIMESYNC_STATE_AUTHORITATIVE
:
181 if (timesync_svc
->state
== GB_TIMESYNC_STATE_WAIT_SVC
) {
182 timesync_svc
->state
= state
;
183 cancel_delayed_work(×ync_svc
->delayed_work
);
184 queue_delayed_work(timesync_svc
->work_queue
,
185 ×ync_svc
->delayed_work
, 0);
188 case GB_TIMESYNC_STATE_PING
:
189 if (timesync_svc
->state
== GB_TIMESYNC_STATE_ACTIVE
) {
190 timesync_svc
->state
= state
;
191 queue_delayed_work(timesync_svc
->work_queue
,
192 ×ync_svc
->delayed_work
,
193 GB_TIMESYNC_DELAYED_WORK_SHORT
);
196 case GB_TIMESYNC_STATE_ACTIVE
:
197 if (timesync_svc
->state
== GB_TIMESYNC_STATE_AUTHORITATIVE
||
198 timesync_svc
->state
== GB_TIMESYNC_STATE_PING
) {
199 timesync_svc
->state
= state
;
200 wake_up(×ync_svc
->wait_queue
);
205 if (WARN_ON(timesync_svc
->state
!= state
)) {
206 pr_err("Invalid state transition %d=>%d\n",
207 timesync_svc
->state
, state
);
211 static void gb_timesync_set_state_atomic(struct gb_timesync_svc
*timesync_svc
,
216 spin_lock_irqsave(×ync_svc
->spinlock
, flags
);
217 gb_timesync_set_state(timesync_svc
, state
);
218 spin_unlock_irqrestore(×ync_svc
->spinlock
, flags
);
221 static u64
gb_timesync_diff(u64 x
, u64 y
)
229 static void gb_timesync_adjust_to_svc(struct gb_timesync_svc
*svc
,
230 u64 svc_frame_time
, u64 ap_frame_time
)
232 if (svc_frame_time
> ap_frame_time
) {
233 svc
->frame_time_offset
= svc_frame_time
- ap_frame_time
;
234 svc
->offset_down
= false;
236 svc
->frame_time_offset
= ap_frame_time
- svc_frame_time
;
237 svc
->offset_down
= true;
242 * Associate a FrameTime with a ktime timestamp represented as struct timespec
243 * Requires the calling context to hold timesync_svc->mutex
245 static void gb_timesync_store_ktime(struct gb_timesync_svc
*timesync_svc
,
246 struct timespec ts
, u64 frame_time
)
248 timesync_svc
->ktime_data
.ts
= ts
;
249 timesync_svc
->ktime_data
.frame_time
= frame_time
;
253 * Find the two pulses that best-match our expected inter-strobe gap and
254 * then calculate the difference between the SVC time at the second pulse
255 * to the local time at the second pulse.
257 static void gb_timesync_collate_frame_time(struct gb_timesync_svc
*timesync_svc
,
261 u64 delta
, ap_frame_time
;
262 u64 strobe_delay_ns
= GB_TIMESYNC_STROBE_DELAY_US
* NSEC_PER_USEC
;
265 for (i
= 1; i
< GB_TIMESYNC_MAX_STROBES
; i
++) {
266 delta
= timesync_svc
->strobe_data
[i
].frame_time
-
267 timesync_svc
->strobe_data
[i
- 1].frame_time
;
268 delta
*= gb_timesync_ns_per_clock
;
269 delta
= gb_timesync_diff(delta
, strobe_delay_ns
);
271 if (!least
|| delta
< least
) {
273 gb_timesync_adjust_to_svc(timesync_svc
, frame_time
[i
],
274 timesync_svc
->strobe_data
[i
].frame_time
);
276 ap_frame_time
= timesync_svc
->strobe_data
[i
].frame_time
;
277 ap_frame_time
= gb_timesync_adjust_count(timesync_svc
,
279 gb_timesync_store_ktime(timesync_svc
,
280 timesync_svc
->strobe_data
[i
].ts
,
283 pr_debug("adjust %s local %llu svc %llu delta %llu\n",
284 timesync_svc
->offset_down
? "down" : "up",
285 timesync_svc
->strobe_data
[i
].frame_time
,
286 frame_time
[i
], delta
);
291 static void gb_timesync_teardown(struct gb_timesync_svc
*timesync_svc
)
293 struct gb_timesync_interface
*timesync_interface
;
294 struct gb_svc
*svc
= timesync_svc
->svc
;
295 struct gb_interface
*interface
;
296 struct gb_host_device
*hd
;
299 list_for_each_entry(timesync_interface
,
300 ×ync_svc
->interface_list
, list
) {
301 interface
= timesync_interface
->interface
;
302 ret
= gb_interface_timesync_disable(interface
);
304 dev_err(&interface
->dev
,
305 "interface timesync_disable %d\n", ret
);
309 hd
= timesync_svc
->timesync_hd
->hd
;
310 ret
= hd
->driver
->timesync_disable(hd
);
312 dev_err(&hd
->dev
, "host timesync_disable %d\n",
316 gb_svc_timesync_wake_pins_release(svc
);
317 gb_svc_timesync_disable(svc
);
318 gb_timesync_platform_unlock_bus();
320 gb_timesync_set_state_atomic(timesync_svc
, GB_TIMESYNC_STATE_INACTIVE
);
323 static void gb_timesync_platform_lock_bus_fail(struct gb_timesync_svc
324 *timesync_svc
, int ret
)
326 if (ret
== -EAGAIN
) {
327 gb_timesync_set_state(timesync_svc
, timesync_svc
->state
);
329 pr_err("Failed to lock timesync bus %d\n", ret
);
330 gb_timesync_set_state(timesync_svc
, GB_TIMESYNC_STATE_INACTIVE
);
334 static void gb_timesync_enable(struct gb_timesync_svc
*timesync_svc
)
336 struct gb_svc
*svc
= timesync_svc
->svc
;
337 struct gb_host_device
*hd
;
338 struct gb_timesync_interface
*timesync_interface
;
339 struct gb_interface
*interface
;
341 unsigned long clock_rate
= gb_timesync_clock_rate
;
345 * Get access to the wake pins in the AP and SVC
346 * Release these pins either in gb_timesync_teardown() or in
347 * gb_timesync_authoritative()
349 ret
= gb_timesync_platform_lock_bus(timesync_svc
);
351 gb_timesync_platform_lock_bus_fail(timesync_svc
, ret
);
354 ret
= gb_svc_timesync_wake_pins_acquire(svc
, timesync_svc
->strobe_mask
);
357 "gb_svc_timesync_wake_pins_acquire %d\n", ret
);
358 gb_timesync_teardown(timesync_svc
);
362 /* Choose an initial time in the future */
363 init_frame_time
= __gb_timesync_get_frame_time(timesync_svc
) + 100000UL;
365 /* Send enable command to all relevant participants */
366 list_for_each_entry(timesync_interface
, ×ync_svc
->interface_list
,
368 interface
= timesync_interface
->interface
;
369 ret
= gb_interface_timesync_enable(interface
,
370 GB_TIMESYNC_MAX_STROBES
,
372 GB_TIMESYNC_STROBE_DELAY_US
,
375 dev_err(&interface
->dev
,
376 "interface timesync_enable %d\n", ret
);
380 hd
= timesync_svc
->timesync_hd
->hd
;
381 ret
= hd
->driver
->timesync_enable(hd
, GB_TIMESYNC_MAX_STROBES
,
383 GB_TIMESYNC_STROBE_DELAY_US
,
386 dev_err(&hd
->dev
, "host timesync_enable %d\n",
390 gb_timesync_set_state_atomic(timesync_svc
, GB_TIMESYNC_STATE_WAIT_SVC
);
391 ret
= gb_svc_timesync_enable(svc
, GB_TIMESYNC_MAX_STROBES
,
393 GB_TIMESYNC_STROBE_DELAY_US
,
397 "gb_svc_timesync_enable %d\n", ret
);
398 gb_timesync_teardown(timesync_svc
);
402 /* Schedule a timeout waiting for SVC to complete strobing */
403 gb_timesync_schedule_svc_timeout(timesync_svc
);
406 static void gb_timesync_authoritative(struct gb_timesync_svc
*timesync_svc
)
408 struct gb_svc
*svc
= timesync_svc
->svc
;
409 struct gb_host_device
*hd
;
410 struct gb_timesync_interface
*timesync_interface
;
411 struct gb_interface
*interface
;
412 u64 svc_frame_time
[GB_TIMESYNC_MAX_STROBES
];
415 /* Get authoritative time from SVC and adjust local clock */
416 ret
= gb_svc_timesync_authoritative(svc
, svc_frame_time
);
419 "gb_svc_timesync_authoritative %d\n", ret
);
420 gb_timesync_teardown(timesync_svc
);
423 gb_timesync_collate_frame_time(timesync_svc
, svc_frame_time
);
425 /* Transmit authoritative time to downstream slaves */
426 hd
= timesync_svc
->timesync_hd
->hd
;
427 ret
= hd
->driver
->timesync_authoritative(hd
, svc_frame_time
);
429 dev_err(&hd
->dev
, "host timesync_authoritative %d\n", ret
);
431 list_for_each_entry(timesync_interface
,
432 ×ync_svc
->interface_list
, list
) {
433 interface
= timesync_interface
->interface
;
434 ret
= gb_interface_timesync_authoritative(
438 dev_err(&interface
->dev
,
439 "interface timesync_authoritative %d\n", ret
);
443 /* Release wake pins */
444 gb_svc_timesync_wake_pins_release(svc
);
445 gb_timesync_platform_unlock_bus();
447 /* Transition to state ACTIVE */
448 gb_timesync_set_state_atomic(timesync_svc
, GB_TIMESYNC_STATE_ACTIVE
);
450 /* Schedule a ping to verify the synchronized system time */
451 timesync_svc
->print_ping
= true;
452 gb_timesync_set_state_atomic(timesync_svc
, GB_TIMESYNC_STATE_PING
);
455 static int __gb_timesync_get_status(struct gb_timesync_svc
*timesync_svc
)
459 switch (timesync_svc
->state
) {
460 case GB_TIMESYNC_STATE_INVALID
:
461 case GB_TIMESYNC_STATE_INACTIVE
:
464 case GB_TIMESYNC_STATE_INIT
:
465 case GB_TIMESYNC_STATE_WAIT_SVC
:
466 case GB_TIMESYNC_STATE_AUTHORITATIVE
:
467 case GB_TIMESYNC_STATE_PING
:
470 case GB_TIMESYNC_STATE_ACTIVE
:
478 * This routine takes a FrameTime and derives the difference with-respect
479 * to a reference FrameTime/ktime pair. It then returns the calculated
480 * ktime based on the difference between the supplied FrameTime and
481 * the reference FrameTime.
483 * The time difference is calculated to six decimal places. Taking 19.2MHz
484 * as an example this means we have 52.083333~ nanoseconds per clock or
485 * 52083333~ femtoseconds per clock.
487 * Naively taking the count difference and converting to
488 * seconds/nanoseconds would quickly see the 0.0833 component produce
489 * noticeable errors. For example a time difference of one second would
490 * loose 19200000 * 0.08333x nanoseconds or 1.59 seconds.
492 * In contrast calculating in femtoseconds the same example of 19200000 *
493 * 0.000000083333x nanoseconds per count of error is just 1.59 nanoseconds!
495 * Continuing the example of 19.2 MHz we cap the maximum error difference
496 * at a worst-case 0.3 microseconds over a potential calculation window of
497 * abount 15 seconds, meaning you can convert a FrameTime that is <= 15
498 * seconds older/younger than the reference time with a maximum error of
499 * 0.2385 useconds. Note 19.2MHz is an example frequency not a requirement.
501 static int gb_timesync_to_timespec(struct gb_timesync_svc
*timesync_svc
,
502 u64 frame_time
, struct timespec
*ts
)
505 u64 delta_fs
, counts
, sec
, nsec
;
509 memset(ts
, 0x00, sizeof(*ts
));
510 mutex_lock(×ync_svc
->mutex
);
511 spin_lock_irqsave(×ync_svc
->spinlock
, flags
);
513 ret
= __gb_timesync_get_status(timesync_svc
);
517 /* Support calculating ktime upwards or downwards from the reference */
518 if (frame_time
< timesync_svc
->ktime_data
.frame_time
) {
520 counts
= timesync_svc
->ktime_data
.frame_time
- frame_time
;
523 counts
= frame_time
- timesync_svc
->ktime_data
.frame_time
;
526 /* Enforce the .23 of a usecond boundary @ 19.2MHz */
527 if (counts
> gb_timesync_max_ktime_diff
) {
532 /* Determine the time difference in femtoseconds */
533 delta_fs
= counts
* gb_timesync_fs_per_clock
;
535 /* Convert to seconds */
537 do_div(sec
, NSEC_PER_SEC
);
538 do_div(sec
, 1000000UL);
540 /* Get the nanosecond remainder */
541 nsec
= do_div(delta_fs
, sec
);
542 do_div(nsec
, 1000000UL);
545 /* Add the calculated offset - overflow nanoseconds upwards */
546 ts
->tv_sec
= timesync_svc
->ktime_data
.ts
.tv_sec
+ sec
;
547 ts
->tv_nsec
= timesync_svc
->ktime_data
.ts
.tv_nsec
+ nsec
;
548 if (ts
->tv_nsec
>= NSEC_PER_SEC
) {
550 ts
->tv_nsec
-= NSEC_PER_SEC
;
553 /* Subtract the difference over/underflow as necessary */
554 if (nsec
> timesync_svc
->ktime_data
.ts
.tv_nsec
) {
556 nsec
= nsec
+ timesync_svc
->ktime_data
.ts
.tv_nsec
;
557 nsec
= do_div(nsec
, NSEC_PER_SEC
);
559 nsec
= timesync_svc
->ktime_data
.ts
.tv_nsec
- nsec
;
561 /* Cannot return a negative second value */
562 if (sec
> timesync_svc
->ktime_data
.ts
.tv_sec
) {
566 ts
->tv_sec
= timesync_svc
->ktime_data
.ts
.tv_sec
- sec
;
570 spin_unlock_irqrestore(×ync_svc
->spinlock
, flags
);
571 mutex_unlock(×ync_svc
->mutex
);
575 static size_t gb_timesync_log_frame_time(struct gb_timesync_svc
*timesync_svc
,
576 char *buf
, size_t buflen
)
578 struct gb_svc
*svc
= timesync_svc
->svc
;
579 struct gb_host_device
*hd
;
580 struct gb_timesync_interface
*timesync_interface
;
581 struct gb_interface
*interface
;
586 off
= snprintf(buf
, buflen
, "timesync: ping-time ap=%llu %s=%llu ",
587 timesync_svc
->ap_ping_frame_time
, dev_name(&svc
->dev
),
588 timesync_svc
->svc_ping_frame_time
);
593 hd
= timesync_svc
->timesync_hd
->hd
;
594 off
+= snprintf(&buf
[off
], len
, "%s=%llu ", dev_name(&hd
->dev
),
595 timesync_svc
->timesync_hd
->ping_frame_time
);
599 list_for_each_entry(timesync_interface
,
600 ×ync_svc
->interface_list
, list
) {
602 interface
= timesync_interface
->interface
;
603 off
+= snprintf(&buf
[off
], len
, "%s=%llu ",
604 dev_name(&interface
->dev
),
605 timesync_interface
->ping_frame_time
);
610 off
+= snprintf(&buf
[off
], len
, "\n");
614 static size_t gb_timesync_log_frame_ktime(struct gb_timesync_svc
*timesync_svc
,
615 char *buf
, size_t buflen
)
617 struct gb_svc
*svc
= timesync_svc
->svc
;
618 struct gb_host_device
*hd
;
619 struct gb_timesync_interface
*timesync_interface
;
620 struct gb_interface
*interface
;
626 gb_timesync_to_timespec(timesync_svc
, timesync_svc
->ap_ping_frame_time
,
628 off
= snprintf(buf
, buflen
, "timesync: ping-time ap=%lu.%lu ",
629 ts
.tv_sec
, ts
.tv_nsec
);
635 gb_timesync_to_timespec(timesync_svc
, timesync_svc
->svc_ping_frame_time
,
637 off
+= snprintf(&buf
[off
], len
, "%s=%lu.%lu ", dev_name(&svc
->dev
),
638 ts
.tv_sec
, ts
.tv_nsec
);
644 hd
= timesync_svc
->timesync_hd
->hd
;
645 gb_timesync_to_timespec(timesync_svc
,
646 timesync_svc
->timesync_hd
->ping_frame_time
,
648 off
+= snprintf(&buf
[off
], len
, "%s=%lu.%lu ",
650 ts
.tv_sec
, ts
.tv_nsec
);
655 list_for_each_entry(timesync_interface
,
656 ×ync_svc
->interface_list
, list
) {
657 interface
= timesync_interface
->interface
;
658 gb_timesync_to_timespec(timesync_svc
,
659 timesync_interface
->ping_frame_time
,
661 off
+= snprintf(&buf
[off
], len
, "%s=%lu.%lu ",
662 dev_name(&interface
->dev
),
663 ts
.tv_sec
, ts
.tv_nsec
);
668 off
+= snprintf(&buf
[off
], len
, "\n");
674 * Send an SVC initiated wake 'ping' to each TimeSync participant.
675 * Get the FrameTime from each participant associated with the wake
678 static void gb_timesync_ping(struct gb_timesync_svc
*timesync_svc
)
680 struct gb_svc
*svc
= timesync_svc
->svc
;
681 struct gb_host_device
*hd
;
682 struct gb_timesync_interface
*timesync_interface
;
683 struct gb_control
*control
;
684 u64
*ping_frame_time
;
687 /* Get access to the wake pins in the AP and SVC */
688 ret
= gb_timesync_platform_lock_bus(timesync_svc
);
690 gb_timesync_platform_lock_bus_fail(timesync_svc
, ret
);
693 ret
= gb_svc_timesync_wake_pins_acquire(svc
, timesync_svc
->strobe_mask
);
696 "gb_svc_timesync_wake_pins_acquire %d\n", ret
);
697 gb_timesync_teardown(timesync_svc
);
701 /* Have SVC generate a timesync ping */
702 timesync_svc
->capture_ping
= true;
703 ret
= gb_svc_timesync_ping(svc
, ×ync_svc
->svc_ping_frame_time
);
704 timesync_svc
->capture_ping
= false;
707 "gb_svc_timesync_ping %d\n", ret
);
708 gb_timesync_teardown(timesync_svc
);
712 /* Get the ping FrameTime from each APB/GPB */
713 hd
= timesync_svc
->timesync_hd
->hd
;
714 ret
= hd
->driver
->timesync_get_last_event(hd
,
715 ×ync_svc
->timesync_hd
->ping_frame_time
);
717 dev_err(&hd
->dev
, "host timesync_get_last_event %d\n", ret
);
719 list_for_each_entry(timesync_interface
,
720 ×ync_svc
->interface_list
, list
) {
721 control
= timesync_interface
->interface
->control
;
722 ping_frame_time
= ×ync_interface
->ping_frame_time
;
723 ret
= gb_control_timesync_get_last_event(control
,
726 dev_err(×ync_interface
->interface
->dev
,
727 "gb_control_timesync_get_last_event %d\n", ret
);
731 /* Ping success - move to timesync active */
732 gb_svc_timesync_wake_pins_release(svc
);
733 gb_timesync_platform_unlock_bus();
734 gb_timesync_set_state_atomic(timesync_svc
, GB_TIMESYNC_STATE_ACTIVE
);
737 static void gb_timesync_log_ping_time(struct gb_timesync_svc
*timesync_svc
)
741 if (!timesync_svc
->print_ping
)
744 buf
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
746 gb_timesync_log_frame_time(timesync_svc
, buf
, PAGE_SIZE
);
753 * Perform the actual work of scheduled TimeSync logic.
755 static void gb_timesync_worker(struct work_struct
*work
)
757 struct delayed_work
*delayed_work
= to_delayed_work(work
);
758 struct gb_timesync_svc
*timesync_svc
=
759 container_of(delayed_work
, struct gb_timesync_svc
, delayed_work
);
761 mutex_lock(×ync_svc
->mutex
);
763 switch (timesync_svc
->state
) {
764 case GB_TIMESYNC_STATE_INIT
:
765 gb_timesync_enable(timesync_svc
);
768 case GB_TIMESYNC_STATE_WAIT_SVC
:
769 dev_err(×ync_svc
->svc
->dev
,
770 "timeout SVC strobe completion\n");
771 gb_timesync_teardown(timesync_svc
);
774 case GB_TIMESYNC_STATE_AUTHORITATIVE
:
775 gb_timesync_authoritative(timesync_svc
);
778 case GB_TIMESYNC_STATE_PING
:
779 gb_timesync_ping(timesync_svc
);
780 gb_timesync_log_ping_time(timesync_svc
);
784 pr_err("Invalid state %d for delayed work\n",
785 timesync_svc
->state
);
789 mutex_unlock(×ync_svc
->mutex
);
793 * Schedule a new TimeSync INIT or PING operation serialized w/r to
794 * gb_timesync_worker().
796 static int gb_timesync_schedule(struct gb_timesync_svc
*timesync_svc
, int state
)
800 if (state
!= GB_TIMESYNC_STATE_INIT
&& state
!= GB_TIMESYNC_STATE_PING
)
803 mutex_lock(×ync_svc
->mutex
);
804 if (timesync_svc
->state
== GB_TIMESYNC_STATE_INACTIVE
||
805 timesync_svc
->state
== GB_TIMESYNC_STATE_ACTIVE
) {
806 gb_timesync_set_state_atomic(timesync_svc
, state
);
810 mutex_unlock(×ync_svc
->mutex
);
814 static int __gb_timesync_schedule_synchronous(
815 struct gb_timesync_svc
*timesync_svc
, int state
)
820 ret
= gb_timesync_schedule(timesync_svc
, state
);
824 ret
= wait_event_interruptible(timesync_svc
->wait_queue
,
825 (timesync_svc
->state
== GB_TIMESYNC_STATE_ACTIVE
||
826 timesync_svc
->state
== GB_TIMESYNC_STATE_INACTIVE
||
827 timesync_svc
->state
== GB_TIMESYNC_STATE_INVALID
));
831 mutex_lock(×ync_svc
->mutex
);
832 spin_lock_irqsave(×ync_svc
->spinlock
, flags
);
834 ret
= __gb_timesync_get_status(timesync_svc
);
836 spin_unlock_irqrestore(×ync_svc
->spinlock
, flags
);
837 mutex_unlock(×ync_svc
->mutex
);
842 static struct gb_timesync_svc
*gb_timesync_find_timesync_svc(
843 struct gb_host_device
*hd
)
845 struct gb_timesync_svc
*timesync_svc
;
847 list_for_each_entry(timesync_svc
, &gb_timesync_svc_list
, list
) {
848 if (timesync_svc
->svc
== hd
->svc
)
854 static struct gb_timesync_interface
*gb_timesync_find_timesync_interface(
855 struct gb_timesync_svc
*timesync_svc
,
856 struct gb_interface
*interface
)
858 struct gb_timesync_interface
*timesync_interface
;
860 list_for_each_entry(timesync_interface
, ×ync_svc
->interface_list
, list
) {
861 if (timesync_interface
->interface
== interface
)
862 return timesync_interface
;
867 int gb_timesync_schedule_synchronous(struct gb_interface
*interface
)
870 struct gb_timesync_svc
*timesync_svc
;
872 if (!(interface
->features
& GREYBUS_INTERFACE_FEATURE_TIMESYNC
))
875 mutex_lock(&gb_timesync_svc_list_mutex
);
876 timesync_svc
= gb_timesync_find_timesync_svc(interface
->hd
);
882 ret
= __gb_timesync_schedule_synchronous(timesync_svc
,
883 GB_TIMESYNC_STATE_INIT
);
885 mutex_unlock(&gb_timesync_svc_list_mutex
);
888 EXPORT_SYMBOL_GPL(gb_timesync_schedule_synchronous
);
890 void gb_timesync_schedule_asynchronous(struct gb_interface
*interface
)
892 struct gb_timesync_svc
*timesync_svc
;
894 if (!(interface
->features
& GREYBUS_INTERFACE_FEATURE_TIMESYNC
))
897 mutex_lock(&gb_timesync_svc_list_mutex
);
898 timesync_svc
= gb_timesync_find_timesync_svc(interface
->hd
);
902 gb_timesync_schedule(timesync_svc
, GB_TIMESYNC_STATE_INIT
);
904 mutex_unlock(&gb_timesync_svc_list_mutex
);
907 EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous
);
909 static ssize_t
gb_timesync_ping_read(struct file
*file
, char __user
*ubuf
,
910 size_t len
, loff_t
*offset
, bool ktime
)
912 struct gb_timesync_svc
*timesync_svc
= file
->f_inode
->i_private
;
916 mutex_lock(&gb_timesync_svc_list_mutex
);
917 mutex_lock(×ync_svc
->mutex
);
918 if (list_empty(×ync_svc
->interface_list
))
920 timesync_svc
->print_ping
= false;
921 mutex_unlock(×ync_svc
->mutex
);
925 ret
= __gb_timesync_schedule_synchronous(timesync_svc
,
926 GB_TIMESYNC_STATE_PING
);
930 buf
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
937 ret
= gb_timesync_log_frame_ktime(timesync_svc
, buf
, PAGE_SIZE
);
939 ret
= gb_timesync_log_frame_time(timesync_svc
, buf
, PAGE_SIZE
);
941 ret
= simple_read_from_buffer(ubuf
, len
, offset
, buf
, ret
);
944 mutex_unlock(&gb_timesync_svc_list_mutex
);
948 static ssize_t
gb_timesync_ping_read_frame_time(struct file
*file
,
950 size_t len
, loff_t
*offset
)
952 return gb_timesync_ping_read(file
, buf
, len
, offset
, false);
955 static ssize_t
gb_timesync_ping_read_frame_ktime(struct file
*file
,
957 size_t len
, loff_t
*offset
)
959 return gb_timesync_ping_read(file
, buf
, len
, offset
, true);
962 static const struct file_operations gb_timesync_debugfs_frame_time_ops
= {
963 .read
= gb_timesync_ping_read_frame_time
,
966 static const struct file_operations gb_timesync_debugfs_frame_ktime_ops
= {
967 .read
= gb_timesync_ping_read_frame_ktime
,
970 static int gb_timesync_hd_add(struct gb_timesync_svc
*timesync_svc
,
971 struct gb_host_device
*hd
)
973 struct gb_timesync_host_device
*timesync_hd
;
975 timesync_hd
= kzalloc(sizeof(*timesync_hd
), GFP_KERNEL
);
979 WARN_ON(timesync_svc
->timesync_hd
);
980 timesync_hd
->hd
= hd
;
981 timesync_svc
->timesync_hd
= timesync_hd
;
986 static void gb_timesync_hd_remove(struct gb_timesync_svc
*timesync_svc
,
987 struct gb_host_device
*hd
)
989 if (timesync_svc
->timesync_hd
->hd
== hd
) {
990 kfree(timesync_svc
->timesync_hd
);
991 timesync_svc
->timesync_hd
= NULL
;
997 int gb_timesync_svc_add(struct gb_svc
*svc
)
999 struct gb_timesync_svc
*timesync_svc
;
1002 timesync_svc
= kzalloc(sizeof(*timesync_svc
), GFP_KERNEL
);
1006 timesync_svc
->work_queue
=
1007 create_singlethread_workqueue("gb-timesync-work_queue");
1009 if (!timesync_svc
->work_queue
) {
1010 kfree(timesync_svc
);
1014 mutex_lock(&gb_timesync_svc_list_mutex
);
1015 INIT_LIST_HEAD(×ync_svc
->interface_list
);
1016 INIT_DELAYED_WORK(×ync_svc
->delayed_work
, gb_timesync_worker
);
1017 mutex_init(×ync_svc
->mutex
);
1018 spin_lock_init(×ync_svc
->spinlock
);
1019 init_waitqueue_head(×ync_svc
->wait_queue
);
1021 timesync_svc
->svc
= svc
;
1022 timesync_svc
->frame_time_offset
= 0;
1023 timesync_svc
->capture_ping
= false;
1024 gb_timesync_set_state_atomic(timesync_svc
, GB_TIMESYNC_STATE_INACTIVE
);
1026 timesync_svc
->frame_time_dentry
=
1027 debugfs_create_file("frame-time", S_IRUGO
, svc
->debugfs_dentry
,
1029 &gb_timesync_debugfs_frame_time_ops
);
1030 timesync_svc
->frame_ktime_dentry
=
1031 debugfs_create_file("frame-ktime", S_IRUGO
, svc
->debugfs_dentry
,
1033 &gb_timesync_debugfs_frame_ktime_ops
);
1035 list_add(×ync_svc
->list
, &gb_timesync_svc_list
);
1036 ret
= gb_timesync_hd_add(timesync_svc
, svc
->hd
);
1038 list_del(×ync_svc
->list
);
1039 debugfs_remove(timesync_svc
->frame_ktime_dentry
);
1040 debugfs_remove(timesync_svc
->frame_time_dentry
);
1041 destroy_workqueue(timesync_svc
->work_queue
);
1042 kfree(timesync_svc
);
1046 init_timer(×ync_svc
->ktime_timer
);
1047 timesync_svc
->ktime_timer
.function
= gb_timesync_ktime_timer_fn
;
1048 timesync_svc
->ktime_timer
.expires
= jiffies
+ GB_TIMESYNC_KTIME_UPDATE
;
1049 timesync_svc
->ktime_timer
.data
= (unsigned long)timesync_svc
;
1050 add_timer(×ync_svc
->ktime_timer
);
1052 mutex_unlock(&gb_timesync_svc_list_mutex
);
1055 EXPORT_SYMBOL_GPL(gb_timesync_svc_add
);
1057 void gb_timesync_svc_remove(struct gb_svc
*svc
)
1059 struct gb_timesync_svc
*timesync_svc
;
1060 struct gb_timesync_interface
*timesync_interface
;
1061 struct gb_timesync_interface
*next
;
1063 mutex_lock(&gb_timesync_svc_list_mutex
);
1064 timesync_svc
= gb_timesync_find_timesync_svc(svc
->hd
);
1068 mutex_lock(×ync_svc
->mutex
);
1070 gb_timesync_teardown(timesync_svc
);
1071 del_timer_sync(×ync_svc
->ktime_timer
);
1073 gb_timesync_hd_remove(timesync_svc
, svc
->hd
);
1074 list_for_each_entry_safe(timesync_interface
, next
,
1075 ×ync_svc
->interface_list
, list
) {
1076 list_del(×ync_interface
->list
);
1077 kfree(timesync_interface
);
1079 gb_timesync_set_state_atomic(timesync_svc
, GB_TIMESYNC_STATE_INVALID
);
1080 debugfs_remove(timesync_svc
->frame_ktime_dentry
);
1081 debugfs_remove(timesync_svc
->frame_time_dentry
);
1082 cancel_delayed_work_sync(×ync_svc
->delayed_work
);
1083 destroy_workqueue(timesync_svc
->work_queue
);
1084 list_del(×ync_svc
->list
);
1086 mutex_unlock(×ync_svc
->mutex
);
1088 kfree(timesync_svc
);
1090 mutex_unlock(&gb_timesync_svc_list_mutex
);
1092 EXPORT_SYMBOL_GPL(gb_timesync_svc_remove
);
1095 * Add a Greybus Interface to the set of TimeSync Interfaces.
1097 int gb_timesync_interface_add(struct gb_interface
*interface
)
1099 struct gb_timesync_svc
*timesync_svc
;
1100 struct gb_timesync_interface
*timesync_interface
;
1103 if (!(interface
->features
& GREYBUS_INTERFACE_FEATURE_TIMESYNC
))
1106 mutex_lock(&gb_timesync_svc_list_mutex
);
1107 timesync_svc
= gb_timesync_find_timesync_svc(interface
->hd
);
1108 if (!timesync_svc
) {
1113 timesync_interface
= kzalloc(sizeof(*timesync_interface
), GFP_KERNEL
);
1114 if (!timesync_interface
) {
1119 mutex_lock(×ync_svc
->mutex
);
1120 timesync_interface
->interface
= interface
;
1121 list_add(×ync_interface
->list
, ×ync_svc
->interface_list
);
1122 timesync_svc
->strobe_mask
|= 1 << interface
->interface_id
;
1123 mutex_unlock(×ync_svc
->mutex
);
1126 mutex_unlock(&gb_timesync_svc_list_mutex
);
1129 EXPORT_SYMBOL_GPL(gb_timesync_interface_add
);
1132 * Remove a Greybus Interface from the set of TimeSync Interfaces.
1134 void gb_timesync_interface_remove(struct gb_interface
*interface
)
1136 struct gb_timesync_svc
*timesync_svc
;
1137 struct gb_timesync_interface
*timesync_interface
;
1139 if (!(interface
->features
& GREYBUS_INTERFACE_FEATURE_TIMESYNC
))
1142 mutex_lock(&gb_timesync_svc_list_mutex
);
1143 timesync_svc
= gb_timesync_find_timesync_svc(interface
->hd
);
1147 timesync_interface
= gb_timesync_find_timesync_interface(timesync_svc
,
1149 if (!timesync_interface
)
1152 mutex_lock(×ync_svc
->mutex
);
1153 timesync_svc
->strobe_mask
&= ~(1 << interface
->interface_id
);
1154 list_del(×ync_interface
->list
);
1155 kfree(timesync_interface
);
1156 mutex_unlock(×ync_svc
->mutex
);
1158 mutex_unlock(&gb_timesync_svc_list_mutex
);
1160 EXPORT_SYMBOL_GPL(gb_timesync_interface_remove
);
1163 * Give the authoritative FrameTime to the calling function. Returns zero if we
1164 * are not in GB_TIMESYNC_STATE_ACTIVE.
1166 static u64
gb_timesync_get_frame_time(struct gb_timesync_svc
*timesync_svc
)
1168 unsigned long flags
;
1171 spin_lock_irqsave(×ync_svc
->spinlock
, flags
);
1172 if (timesync_svc
->state
== GB_TIMESYNC_STATE_ACTIVE
)
1173 ret
= __gb_timesync_get_frame_time(timesync_svc
);
1176 spin_unlock_irqrestore(×ync_svc
->spinlock
, flags
);
1180 u64
gb_timesync_get_frame_time_by_interface(struct gb_interface
*interface
)
1182 struct gb_timesync_svc
*timesync_svc
;
1185 mutex_lock(&gb_timesync_svc_list_mutex
);
1186 timesync_svc
= gb_timesync_find_timesync_svc(interface
->hd
);
1190 ret
= gb_timesync_get_frame_time(timesync_svc
);
1192 mutex_unlock(&gb_timesync_svc_list_mutex
);
1195 EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_interface
);
1197 u64
gb_timesync_get_frame_time_by_svc(struct gb_svc
*svc
)
1199 struct gb_timesync_svc
*timesync_svc
;
1202 mutex_lock(&gb_timesync_svc_list_mutex
);
1203 timesync_svc
= gb_timesync_find_timesync_svc(svc
->hd
);
1207 ret
= gb_timesync_get_frame_time(timesync_svc
);
1209 mutex_unlock(&gb_timesync_svc_list_mutex
);
1212 EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_svc
);
1214 /* Incrementally updates the conversion base from FrameTime to ktime */
1215 static void gb_timesync_ktime_timer_fn(unsigned long data
)
1217 struct gb_timesync_svc
*timesync_svc
=
1218 (struct gb_timesync_svc
*)data
;
1219 unsigned long flags
;
1223 spin_lock_irqsave(×ync_svc
->spinlock
, flags
);
1225 if (timesync_svc
->state
!= GB_TIMESYNC_STATE_ACTIVE
)
1229 frame_time
= __gb_timesync_get_frame_time(timesync_svc
);
1230 gb_timesync_store_ktime(timesync_svc
, ts
, frame_time
);
1233 spin_unlock_irqrestore(×ync_svc
->spinlock
, flags
);
1234 mod_timer(×ync_svc
->ktime_timer
,
1235 jiffies
+ GB_TIMESYNC_KTIME_UPDATE
);
1238 int gb_timesync_to_timespec_by_svc(struct gb_svc
*svc
, u64 frame_time
,
1239 struct timespec
*ts
)
1241 struct gb_timesync_svc
*timesync_svc
;
1244 mutex_lock(&gb_timesync_svc_list_mutex
);
1245 timesync_svc
= gb_timesync_find_timesync_svc(svc
->hd
);
1246 if (!timesync_svc
) {
1250 ret
= gb_timesync_to_timespec(timesync_svc
, frame_time
, ts
);
1252 mutex_unlock(&gb_timesync_svc_list_mutex
);
1255 EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_svc
);
1257 int gb_timesync_to_timespec_by_interface(struct gb_interface
*interface
,
1258 u64 frame_time
, struct timespec
*ts
)
1260 struct gb_timesync_svc
*timesync_svc
;
1263 mutex_lock(&gb_timesync_svc_list_mutex
);
1264 timesync_svc
= gb_timesync_find_timesync_svc(interface
->hd
);
1265 if (!timesync_svc
) {
1270 ret
= gb_timesync_to_timespec(timesync_svc
, frame_time
, ts
);
1272 mutex_unlock(&gb_timesync_svc_list_mutex
);
1275 EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_interface
);
1277 void gb_timesync_irq(struct gb_timesync_svc
*timesync_svc
)
1279 unsigned long flags
;
1281 bool strobe_is_ping
= true;
1285 strobe_time
= __gb_timesync_get_frame_time(timesync_svc
);
1287 spin_lock_irqsave(×ync_svc
->spinlock
, flags
);
1289 if (timesync_svc
->state
== GB_TIMESYNC_STATE_PING
) {
1290 if (!timesync_svc
->capture_ping
)
1292 timesync_svc
->ap_ping_frame_time
= strobe_time
;
1294 } else if (timesync_svc
->state
!= GB_TIMESYNC_STATE_WAIT_SVC
) {
1298 timesync_svc
->strobe_data
[timesync_svc
->strobe
].frame_time
= strobe_time
;
1299 timesync_svc
->strobe_data
[timesync_svc
->strobe
].ts
= ts
;
1301 if (++timesync_svc
->strobe
== GB_TIMESYNC_MAX_STROBES
) {
1302 gb_timesync_set_state(timesync_svc
,
1303 GB_TIMESYNC_STATE_AUTHORITATIVE
);
1305 strobe_is_ping
= false;
1307 trace_gb_timesync_irq(strobe_is_ping
, timesync_svc
->strobe
,
1308 GB_TIMESYNC_MAX_STROBES
, strobe_time
);
1310 spin_unlock_irqrestore(×ync_svc
->spinlock
, flags
);
1312 EXPORT_SYMBOL(gb_timesync_irq
);
1314 int __init
gb_timesync_init(void)
1318 ret
= gb_timesync_platform_init();
1320 pr_err("timesync platform init fail!\n");
1324 gb_timesync_clock_rate
= gb_timesync_platform_get_clock_rate();
1326 /* Calculate nanoseconds and femtoseconds per clock */
1327 gb_timesync_fs_per_clock
= FSEC_PER_SEC
;
1328 do_div(gb_timesync_fs_per_clock
, gb_timesync_clock_rate
);
1329 gb_timesync_ns_per_clock
= NSEC_PER_SEC
;
1330 do_div(gb_timesync_ns_per_clock
, gb_timesync_clock_rate
);
1332 /* Calculate the maximum number of clocks we will convert to ktime */
1333 gb_timesync_max_ktime_diff
=
1334 GB_TIMESYNC_MAX_KTIME_CONVERSION
* gb_timesync_clock_rate
;
1336 pr_info("Time-Sync @ %lu Hz max ktime conversion +/- %d seconds\n",
1337 gb_timesync_clock_rate
, GB_TIMESYNC_MAX_KTIME_CONVERSION
);
1341 void gb_timesync_exit(void)
1343 gb_timesync_platform_exit();