]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/greybus/timesync.c
greybus: timesync: Do 64 bit divisions in a 32 friendly way
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / greybus / timesync.c
1 /*
2 * TimeSync API driver.
3 *
4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9 #include <linux/debugfs.h>
10 #include <linux/hrtimer.h>
11 #include "greybus.h"
12 #include "timesync.h"
13 #include "greybus_trace.h"
14
15 /*
16 * Minimum inter-strobe value of one millisecond is chosen because it
17 * just-about fits the common definition of a jiffy.
18 *
19 * Maximum value OTOH is constrained by the number of bits the SVC can fit
20 * into a 16 bit up-counter. The SVC configures the timer in microseconds
21 * so the maximum allowable value is 65535 microseconds. We clip that value
22 * to 10000 microseconds for the sake of using nice round base 10 numbers
23 * and since right-now there's no imaginable use-case requiring anything
24 * other than a one millisecond inter-strobe time, let alone something
25 * higher than ten milliseconds.
26 */
27 #define GB_TIMESYNC_STROBE_DELAY_US 1000
28 #define GB_TIMESYNC_DEFAULT_OFFSET_US 1000
29
30 /* Work queue timers long, short and SVC strobe timeout */
31 #define GB_TIMESYNC_DELAYED_WORK_LONG msecs_to_jiffies(1000)
32 #define GB_TIMESYNC_DELAYED_WORK_SHORT msecs_to_jiffies(1)
33 #define GB_TIMESYNC_MAX_WAIT_SVC msecs_to_jiffies(5000)
34 #define GB_TIMESYNC_KTIME_UPDATE msecs_to_jiffies(1000)
35 #define GB_TIMESYNC_MAX_KTIME_CONVERSION 15
36
37 /* Reported nanoseconds/femtoseconds per clock */
38 static u64 gb_timesync_ns_per_clock;
39 static u64 gb_timesync_fs_per_clock;
40
41 /* Maximum difference we will accept converting FrameTime to ktime */
42 static u32 gb_timesync_max_ktime_diff;
43
44 /* Reported clock rate */
45 static unsigned long gb_timesync_clock_rate;
46
47 /* Workqueue */
48 static void gb_timesync_worker(struct work_struct *work);
49
50 /* List of SVCs with one FrameTime per SVC */
51 static LIST_HEAD(gb_timesync_svc_list);
52
53 /* Synchronize parallel contexts accessing a valid timesync_svc pointer */
54 static DEFINE_MUTEX(gb_timesync_svc_list_mutex);
55
56 /* Structure to convert from FrameTime to timespec/ktime */
57 struct gb_timesync_frame_time_data {
58 u64 frame_time;
59 struct timespec ts;
60 };
61
62 struct gb_timesync_svc {
63 struct list_head list;
64 struct list_head interface_list;
65 struct gb_svc *svc;
66 struct gb_timesync_host_device *timesync_hd;
67
68 spinlock_t spinlock; /* Per SVC spinlock to sync with ISR */
69 struct mutex mutex; /* Per SVC mutex for regular synchronization */
70
71 struct dentry *frame_time_dentry;
72 struct dentry *frame_ktime_dentry;
73 struct workqueue_struct *work_queue;
74 wait_queue_head_t wait_queue;
75 struct delayed_work delayed_work;
76 struct timer_list ktime_timer;
77
78 /* The current local FrameTime */
79 u64 frame_time_offset;
80 struct gb_timesync_frame_time_data strobe_data[GB_TIMESYNC_MAX_STROBES];
81 struct gb_timesync_frame_time_data ktime_data;
82
83 /* The SVC FrameTime and relative AP FrameTime @ last TIMESYNC_PING */
84 u64 svc_ping_frame_time;
85 u64 ap_ping_frame_time;
86
87 /* Transitory settings */
88 u32 strobe_mask;
89 bool offset_down;
90 bool print_ping;
91 bool capture_ping;
92 int strobe;
93
94 /* Current state */
95 int state;
96 };
97
98 struct gb_timesync_host_device {
99 struct list_head list;
100 struct gb_host_device *hd;
101 u64 ping_frame_time;
102 };
103
104 struct gb_timesync_interface {
105 struct list_head list;
106 struct gb_interface *interface;
107 u64 ping_frame_time;
108 };
109
110 enum gb_timesync_state {
111 GB_TIMESYNC_STATE_INVALID = 0,
112 GB_TIMESYNC_STATE_INACTIVE = 1,
113 GB_TIMESYNC_STATE_INIT = 2,
114 GB_TIMESYNC_STATE_WAIT_SVC = 3,
115 GB_TIMESYNC_STATE_AUTHORITATIVE = 4,
116 GB_TIMESYNC_STATE_PING = 5,
117 GB_TIMESYNC_STATE_ACTIVE = 6,
118 };
119
120 static void gb_timesync_ktime_timer_fn(unsigned long data);
121
122 static u64 gb_timesync_adjust_count(struct gb_timesync_svc *timesync_svc,
123 u64 counts)
124 {
125 if (timesync_svc->offset_down)
126 return counts - timesync_svc->frame_time_offset;
127 else
128 return counts + timesync_svc->frame_time_offset;
129 }
130
131 /*
132 * This function provides the authoritative FrameTime to a calling function. It
133 * is designed to be lockless and should remain that way the caller is assumed
134 * to be state-aware.
135 */
136 static u64 __gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
137 {
138 u64 clocks = gb_timesync_platform_get_counter();
139
140 return gb_timesync_adjust_count(timesync_svc, clocks);
141 }
142
143 static void gb_timesync_schedule_svc_timeout(struct gb_timesync_svc
144 *timesync_svc)
145 {
146 queue_delayed_work(timesync_svc->work_queue,
147 &timesync_svc->delayed_work,
148 GB_TIMESYNC_MAX_WAIT_SVC);
149 }
150
151 static void gb_timesync_set_state(struct gb_timesync_svc *timesync_svc,
152 int state)
153 {
154 switch (state) {
155 case GB_TIMESYNC_STATE_INVALID:
156 timesync_svc->state = state;
157 wake_up(&timesync_svc->wait_queue);
158 break;
159 case GB_TIMESYNC_STATE_INACTIVE:
160 if (timesync_svc->state != GB_TIMESYNC_STATE_INIT) {
161 timesync_svc->state = state;
162 wake_up(&timesync_svc->wait_queue);
163 }
164 break;
165 case GB_TIMESYNC_STATE_INIT:
166 if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) {
167 timesync_svc->strobe = 0;
168 timesync_svc->frame_time_offset = 0;
169 timesync_svc->state = state;
170 cancel_delayed_work(&timesync_svc->delayed_work);
171 queue_delayed_work(timesync_svc->work_queue,
172 &timesync_svc->delayed_work,
173 GB_TIMESYNC_DELAYED_WORK_LONG);
174 }
175 break;
176 case GB_TIMESYNC_STATE_WAIT_SVC:
177 if (timesync_svc->state == GB_TIMESYNC_STATE_INIT)
178 timesync_svc->state = state;
179 break;
180 case GB_TIMESYNC_STATE_AUTHORITATIVE:
181 if (timesync_svc->state == GB_TIMESYNC_STATE_WAIT_SVC) {
182 timesync_svc->state = state;
183 cancel_delayed_work(&timesync_svc->delayed_work);
184 queue_delayed_work(timesync_svc->work_queue,
185 &timesync_svc->delayed_work, 0);
186 }
187 break;
188 case GB_TIMESYNC_STATE_PING:
189 if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) {
190 timesync_svc->state = state;
191 queue_delayed_work(timesync_svc->work_queue,
192 &timesync_svc->delayed_work,
193 GB_TIMESYNC_DELAYED_WORK_SHORT);
194 }
195 break;
196 case GB_TIMESYNC_STATE_ACTIVE:
197 if (timesync_svc->state == GB_TIMESYNC_STATE_AUTHORITATIVE ||
198 timesync_svc->state == GB_TIMESYNC_STATE_PING) {
199 timesync_svc->state = state;
200 wake_up(&timesync_svc->wait_queue);
201 }
202 break;
203 }
204
205 if (WARN_ON(timesync_svc->state != state)) {
206 pr_err("Invalid state transition %d=>%d\n",
207 timesync_svc->state, state);
208 }
209 }
210
211 static void gb_timesync_set_state_atomic(struct gb_timesync_svc *timesync_svc,
212 int state)
213 {
214 unsigned long flags;
215
216 spin_lock_irqsave(&timesync_svc->spinlock, flags);
217 gb_timesync_set_state(timesync_svc, state);
218 spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
219 }
220
221 static u64 gb_timesync_diff(u64 x, u64 y)
222 {
223 if (x > y)
224 return x - y;
225 else
226 return y - x;
227 }
228
229 static void gb_timesync_adjust_to_svc(struct gb_timesync_svc *svc,
230 u64 svc_frame_time, u64 ap_frame_time)
231 {
232 if (svc_frame_time > ap_frame_time) {
233 svc->frame_time_offset = svc_frame_time - ap_frame_time;
234 svc->offset_down = false;
235 } else {
236 svc->frame_time_offset = ap_frame_time - svc_frame_time;
237 svc->offset_down = true;
238 }
239 }
240
241 /*
242 * Associate a FrameTime with a ktime timestamp represented as struct timespec
243 * Requires the calling context to hold timesync_svc->mutex
244 */
245 static void gb_timesync_store_ktime(struct gb_timesync_svc *timesync_svc,
246 struct timespec ts, u64 frame_time)
247 {
248 timesync_svc->ktime_data.ts = ts;
249 timesync_svc->ktime_data.frame_time = frame_time;
250 }
251
252 /*
253 * Find the two pulses that best-match our expected inter-strobe gap and
254 * then calculate the difference between the SVC time at the second pulse
255 * to the local time at the second pulse.
256 */
257 static void gb_timesync_collate_frame_time(struct gb_timesync_svc *timesync_svc,
258 u64 *frame_time)
259 {
260 int i = 0;
261 u64 delta, ap_frame_time;
262 u64 strobe_delay_ns = GB_TIMESYNC_STROBE_DELAY_US * NSEC_PER_USEC;
263 u64 least = 0;
264
265 for (i = 1; i < GB_TIMESYNC_MAX_STROBES; i++) {
266 delta = timesync_svc->strobe_data[i].frame_time -
267 timesync_svc->strobe_data[i - 1].frame_time;
268 delta *= gb_timesync_ns_per_clock;
269 delta = gb_timesync_diff(delta, strobe_delay_ns);
270
271 if (!least || delta < least) {
272 least = delta;
273 gb_timesync_adjust_to_svc(timesync_svc, frame_time[i],
274 timesync_svc->strobe_data[i].frame_time);
275
276 ap_frame_time = timesync_svc->strobe_data[i].frame_time;
277 ap_frame_time = gb_timesync_adjust_count(timesync_svc,
278 ap_frame_time);
279 gb_timesync_store_ktime(timesync_svc,
280 timesync_svc->strobe_data[i].ts,
281 ap_frame_time);
282
283 pr_debug("adjust %s local %llu svc %llu delta %llu\n",
284 timesync_svc->offset_down ? "down" : "up",
285 timesync_svc->strobe_data[i].frame_time,
286 frame_time[i], delta);
287 }
288 }
289 }
290
291 static void gb_timesync_teardown(struct gb_timesync_svc *timesync_svc)
292 {
293 struct gb_timesync_interface *timesync_interface;
294 struct gb_svc *svc = timesync_svc->svc;
295 struct gb_interface *interface;
296 struct gb_host_device *hd;
297 int ret;
298
299 list_for_each_entry(timesync_interface,
300 &timesync_svc->interface_list, list) {
301 interface = timesync_interface->interface;
302 ret = gb_interface_timesync_disable(interface);
303 if (ret) {
304 dev_err(&interface->dev,
305 "interface timesync_disable %d\n", ret);
306 }
307 }
308
309 hd = timesync_svc->timesync_hd->hd;
310 ret = hd->driver->timesync_disable(hd);
311 if (ret < 0) {
312 dev_err(&hd->dev, "host timesync_disable %d\n",
313 ret);
314 }
315
316 gb_svc_timesync_wake_pins_release(svc);
317 gb_svc_timesync_disable(svc);
318 gb_timesync_platform_unlock_bus();
319
320 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
321 }
322
323 static void gb_timesync_platform_lock_bus_fail(struct gb_timesync_svc
324 *timesync_svc, int ret)
325 {
326 if (ret == -EAGAIN) {
327 gb_timesync_set_state(timesync_svc, timesync_svc->state);
328 } else {
329 pr_err("Failed to lock timesync bus %d\n", ret);
330 gb_timesync_set_state(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
331 }
332 }
333
334 static void gb_timesync_enable(struct gb_timesync_svc *timesync_svc)
335 {
336 struct gb_svc *svc = timesync_svc->svc;
337 struct gb_host_device *hd;
338 struct gb_timesync_interface *timesync_interface;
339 struct gb_interface *interface;
340 u64 init_frame_time;
341 unsigned long clock_rate = gb_timesync_clock_rate;
342 int ret;
343
344 /*
345 * Get access to the wake pins in the AP and SVC
346 * Release these pins either in gb_timesync_teardown() or in
347 * gb_timesync_authoritative()
348 */
349 ret = gb_timesync_platform_lock_bus(timesync_svc);
350 if (ret < 0) {
351 gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
352 return;
353 }
354 ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
355 if (ret) {
356 dev_err(&svc->dev,
357 "gb_svc_timesync_wake_pins_acquire %d\n", ret);
358 gb_timesync_teardown(timesync_svc);
359 return;
360 }
361
362 /* Choose an initial time in the future */
363 init_frame_time = __gb_timesync_get_frame_time(timesync_svc) + 100000UL;
364
365 /* Send enable command to all relevant participants */
366 list_for_each_entry(timesync_interface, &timesync_svc->interface_list,
367 list) {
368 interface = timesync_interface->interface;
369 ret = gb_interface_timesync_enable(interface,
370 GB_TIMESYNC_MAX_STROBES,
371 init_frame_time,
372 GB_TIMESYNC_STROBE_DELAY_US,
373 clock_rate);
374 if (ret) {
375 dev_err(&interface->dev,
376 "interface timesync_enable %d\n", ret);
377 }
378 }
379
380 hd = timesync_svc->timesync_hd->hd;
381 ret = hd->driver->timesync_enable(hd, GB_TIMESYNC_MAX_STROBES,
382 init_frame_time,
383 GB_TIMESYNC_STROBE_DELAY_US,
384 clock_rate);
385 if (ret < 0) {
386 dev_err(&hd->dev, "host timesync_enable %d\n",
387 ret);
388 }
389
390 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_WAIT_SVC);
391 ret = gb_svc_timesync_enable(svc, GB_TIMESYNC_MAX_STROBES,
392 init_frame_time,
393 GB_TIMESYNC_STROBE_DELAY_US,
394 clock_rate);
395 if (ret) {
396 dev_err(&svc->dev,
397 "gb_svc_timesync_enable %d\n", ret);
398 gb_timesync_teardown(timesync_svc);
399 return;
400 }
401
402 /* Schedule a timeout waiting for SVC to complete strobing */
403 gb_timesync_schedule_svc_timeout(timesync_svc);
404 }
405
406 static void gb_timesync_authoritative(struct gb_timesync_svc *timesync_svc)
407 {
408 struct gb_svc *svc = timesync_svc->svc;
409 struct gb_host_device *hd;
410 struct gb_timesync_interface *timesync_interface;
411 struct gb_interface *interface;
412 u64 svc_frame_time[GB_TIMESYNC_MAX_STROBES];
413 int ret;
414
415 /* Get authoritative time from SVC and adjust local clock */
416 ret = gb_svc_timesync_authoritative(svc, svc_frame_time);
417 if (ret) {
418 dev_err(&svc->dev,
419 "gb_svc_timesync_authoritative %d\n", ret);
420 gb_timesync_teardown(timesync_svc);
421 return;
422 }
423 gb_timesync_collate_frame_time(timesync_svc, svc_frame_time);
424
425 /* Transmit authoritative time to downstream slaves */
426 hd = timesync_svc->timesync_hd->hd;
427 ret = hd->driver->timesync_authoritative(hd, svc_frame_time);
428 if (ret < 0)
429 dev_err(&hd->dev, "host timesync_authoritative %d\n", ret);
430
431 list_for_each_entry(timesync_interface,
432 &timesync_svc->interface_list, list) {
433 interface = timesync_interface->interface;
434 ret = gb_interface_timesync_authoritative(
435 interface,
436 svc_frame_time);
437 if (ret) {
438 dev_err(&interface->dev,
439 "interface timesync_authoritative %d\n", ret);
440 }
441 }
442
443 /* Release wake pins */
444 gb_svc_timesync_wake_pins_release(svc);
445 gb_timesync_platform_unlock_bus();
446
447 /* Transition to state ACTIVE */
448 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
449
450 /* Schedule a ping to verify the synchronized system time */
451 timesync_svc->print_ping = true;
452 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_PING);
453 }
454
455 static int __gb_timesync_get_status(struct gb_timesync_svc *timesync_svc)
456 {
457 int ret = -EINVAL;
458
459 switch (timesync_svc->state) {
460 case GB_TIMESYNC_STATE_INVALID:
461 case GB_TIMESYNC_STATE_INACTIVE:
462 ret = -ENODEV;
463 break;
464 case GB_TIMESYNC_STATE_INIT:
465 case GB_TIMESYNC_STATE_WAIT_SVC:
466 case GB_TIMESYNC_STATE_AUTHORITATIVE:
467 case GB_TIMESYNC_STATE_PING:
468 ret = -EAGAIN;
469 break;
470 case GB_TIMESYNC_STATE_ACTIVE:
471 ret = 0;
472 break;
473 }
474 return ret;
475 }
476
477 /*
478 * This routine takes a FrameTime and derives the difference with-respect
479 * to a reference FrameTime/ktime pair. It then returns the calculated
480 * ktime based on the difference between the supplied FrameTime and
481 * the reference FrameTime.
482 *
483 * The time difference is calculated to six decimal places. Taking 19.2MHz
484 * as an example this means we have 52.083333~ nanoseconds per clock or
485 * 52083333~ femtoseconds per clock.
486 *
487 * Naively taking the count difference and converting to
488 * seconds/nanoseconds would quickly see the 0.0833 component produce
489 * noticeable errors. For example a time difference of one second would
490 * loose 19200000 * 0.08333x nanoseconds or 1.59 seconds.
491 *
492 * In contrast calculating in femtoseconds the same example of 19200000 *
493 * 0.000000083333x nanoseconds per count of error is just 1.59 nanoseconds!
494 *
495 * Continuing the example of 19.2 MHz we cap the maximum error difference
496 * at a worst-case 0.3 microseconds over a potential calculation window of
497 * abount 15 seconds, meaning you can convert a FrameTime that is <= 15
498 * seconds older/younger than the reference time with a maximum error of
499 * 0.2385 useconds. Note 19.2MHz is an example frequency not a requirement.
500 */
501 static int gb_timesync_to_timespec(struct gb_timesync_svc *timesync_svc,
502 u64 frame_time, struct timespec *ts)
503 {
504 unsigned long flags;
505 u64 delta_fs, counts, sec, nsec;
506 bool add;
507 int ret = 0;
508
509 memset(ts, 0x00, sizeof(*ts));
510 mutex_lock(&timesync_svc->mutex);
511 spin_lock_irqsave(&timesync_svc->spinlock, flags);
512
513 ret = __gb_timesync_get_status(timesync_svc);
514 if (ret)
515 goto done;
516
517 /* Support calculating ktime upwards or downwards from the reference */
518 if (frame_time < timesync_svc->ktime_data.frame_time) {
519 add = false;
520 counts = timesync_svc->ktime_data.frame_time - frame_time;
521 } else {
522 add = true;
523 counts = frame_time - timesync_svc->ktime_data.frame_time;
524 }
525
526 /* Enforce the .23 of a usecond boundary @ 19.2MHz */
527 if (counts > gb_timesync_max_ktime_diff) {
528 ret = -EINVAL;
529 goto done;
530 }
531
532 /* Determine the time difference in femtoseconds */
533 delta_fs = counts * gb_timesync_fs_per_clock;
534
535 /* Convert to seconds */
536 sec = delta_fs;
537 do_div(sec, NSEC_PER_SEC);
538 do_div(sec, 1000000UL);
539
540 /* Get the nanosecond remainder */
541 nsec = do_div(delta_fs, sec);
542 do_div(nsec, 1000000UL);
543
544 if (add) {
545 /* Add the calculated offset - overflow nanoseconds upwards */
546 ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec + sec;
547 ts->tv_nsec = timesync_svc->ktime_data.ts.tv_nsec + nsec;
548 if (ts->tv_nsec >= NSEC_PER_SEC) {
549 ts->tv_sec++;
550 ts->tv_nsec -= NSEC_PER_SEC;
551 }
552 } else {
553 /* Subtract the difference over/underflow as necessary */
554 if (nsec > timesync_svc->ktime_data.ts.tv_nsec) {
555 sec++;
556 nsec = nsec + timesync_svc->ktime_data.ts.tv_nsec;
557 nsec = do_div(nsec, NSEC_PER_SEC);
558 } else {
559 nsec = timesync_svc->ktime_data.ts.tv_nsec - nsec;
560 }
561 /* Cannot return a negative second value */
562 if (sec > timesync_svc->ktime_data.ts.tv_sec) {
563 ret = -EINVAL;
564 goto done;
565 }
566 ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec - sec;
567 ts->tv_nsec = nsec;
568 }
569 done:
570 spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
571 mutex_unlock(&timesync_svc->mutex);
572 return ret;
573 }
574
575 static size_t gb_timesync_log_frame_time(struct gb_timesync_svc *timesync_svc,
576 char *buf, size_t buflen)
577 {
578 struct gb_svc *svc = timesync_svc->svc;
579 struct gb_host_device *hd;
580 struct gb_timesync_interface *timesync_interface;
581 struct gb_interface *interface;
582 unsigned int len;
583 size_t off;
584
585 /* AP/SVC */
586 off = snprintf(buf, buflen, "timesync: ping-time ap=%llu %s=%llu ",
587 timesync_svc->ap_ping_frame_time, dev_name(&svc->dev),
588 timesync_svc->svc_ping_frame_time);
589 len = buflen - off;
590
591 /* APB/GPB */
592 if (len < buflen) {
593 hd = timesync_svc->timesync_hd->hd;
594 off += snprintf(&buf[off], len, "%s=%llu ", dev_name(&hd->dev),
595 timesync_svc->timesync_hd->ping_frame_time);
596 len = buflen - off;
597 }
598
599 list_for_each_entry(timesync_interface,
600 &timesync_svc->interface_list, list) {
601 if (len < buflen) {
602 interface = timesync_interface->interface;
603 off += snprintf(&buf[off], len, "%s=%llu ",
604 dev_name(&interface->dev),
605 timesync_interface->ping_frame_time);
606 len = buflen - off;
607 }
608 }
609 if (len < buflen)
610 off += snprintf(&buf[off], len, "\n");
611 return off;
612 }
613
614 static size_t gb_timesync_log_frame_ktime(struct gb_timesync_svc *timesync_svc,
615 char *buf, size_t buflen)
616 {
617 struct gb_svc *svc = timesync_svc->svc;
618 struct gb_host_device *hd;
619 struct gb_timesync_interface *timesync_interface;
620 struct gb_interface *interface;
621 struct timespec ts;
622 unsigned int len;
623 size_t off;
624
625 /* AP */
626 gb_timesync_to_timespec(timesync_svc, timesync_svc->ap_ping_frame_time,
627 &ts);
628 off = snprintf(buf, buflen, "timesync: ping-time ap=%lu.%lu ",
629 ts.tv_sec, ts.tv_nsec);
630 len = buflen - off;
631 if (len >= buflen)
632 goto done;
633
634 /* SVC */
635 gb_timesync_to_timespec(timesync_svc, timesync_svc->svc_ping_frame_time,
636 &ts);
637 off += snprintf(&buf[off], len, "%s=%lu.%lu ", dev_name(&svc->dev),
638 ts.tv_sec, ts.tv_nsec);
639 len = buflen - off;
640 if (len >= buflen)
641 goto done;
642
643 /* APB/GPB */
644 hd = timesync_svc->timesync_hd->hd;
645 gb_timesync_to_timespec(timesync_svc,
646 timesync_svc->timesync_hd->ping_frame_time,
647 &ts);
648 off += snprintf(&buf[off], len, "%s=%lu.%lu ",
649 dev_name(&hd->dev),
650 ts.tv_sec, ts.tv_nsec);
651 len = buflen - off;
652 if (len >= buflen)
653 goto done;
654
655 list_for_each_entry(timesync_interface,
656 &timesync_svc->interface_list, list) {
657 interface = timesync_interface->interface;
658 gb_timesync_to_timespec(timesync_svc,
659 timesync_interface->ping_frame_time,
660 &ts);
661 off += snprintf(&buf[off], len, "%s=%lu.%lu ",
662 dev_name(&interface->dev),
663 ts.tv_sec, ts.tv_nsec);
664 len = buflen - off;
665 if (len >= buflen)
666 goto done;
667 }
668 off += snprintf(&buf[off], len, "\n");
669 done:
670 return off;
671 }
672
673 /*
674 * Send an SVC initiated wake 'ping' to each TimeSync participant.
675 * Get the FrameTime from each participant associated with the wake
676 * ping.
677 */
678 static void gb_timesync_ping(struct gb_timesync_svc *timesync_svc)
679 {
680 struct gb_svc *svc = timesync_svc->svc;
681 struct gb_host_device *hd;
682 struct gb_timesync_interface *timesync_interface;
683 struct gb_control *control;
684 u64 *ping_frame_time;
685 int ret;
686
687 /* Get access to the wake pins in the AP and SVC */
688 ret = gb_timesync_platform_lock_bus(timesync_svc);
689 if (ret < 0) {
690 gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
691 return;
692 }
693 ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
694 if (ret) {
695 dev_err(&svc->dev,
696 "gb_svc_timesync_wake_pins_acquire %d\n", ret);
697 gb_timesync_teardown(timesync_svc);
698 return;
699 }
700
701 /* Have SVC generate a timesync ping */
702 timesync_svc->capture_ping = true;
703 ret = gb_svc_timesync_ping(svc, &timesync_svc->svc_ping_frame_time);
704 timesync_svc->capture_ping = false;
705 if (ret) {
706 dev_err(&svc->dev,
707 "gb_svc_timesync_ping %d\n", ret);
708 gb_timesync_teardown(timesync_svc);
709 return;
710 }
711
712 /* Get the ping FrameTime from each APB/GPB */
713 hd = timesync_svc->timesync_hd->hd;
714 ret = hd->driver->timesync_get_last_event(hd,
715 &timesync_svc->timesync_hd->ping_frame_time);
716 if (ret)
717 dev_err(&hd->dev, "host timesync_get_last_event %d\n", ret);
718
719 list_for_each_entry(timesync_interface,
720 &timesync_svc->interface_list, list) {
721 control = timesync_interface->interface->control;
722 ping_frame_time = &timesync_interface->ping_frame_time;
723 ret = gb_control_timesync_get_last_event(control,
724 ping_frame_time);
725 if (ret) {
726 dev_err(&timesync_interface->interface->dev,
727 "gb_control_timesync_get_last_event %d\n", ret);
728 }
729 }
730
731 /* Ping success - move to timesync active */
732 gb_svc_timesync_wake_pins_release(svc);
733 gb_timesync_platform_unlock_bus();
734 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
735 }
736
737 static void gb_timesync_log_ping_time(struct gb_timesync_svc *timesync_svc)
738 {
739 char *buf;
740
741 if (!timesync_svc->print_ping)
742 return;
743
744 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
745 if (buf) {
746 gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
747 pr_info("%s", buf);
748 kfree(buf);
749 }
750 }
751
752 /*
753 * Perform the actual work of scheduled TimeSync logic.
754 */
755 static void gb_timesync_worker(struct work_struct *work)
756 {
757 struct delayed_work *delayed_work = to_delayed_work(work);
758 struct gb_timesync_svc *timesync_svc =
759 container_of(delayed_work, struct gb_timesync_svc, delayed_work);
760
761 mutex_lock(&timesync_svc->mutex);
762
763 switch (timesync_svc->state) {
764 case GB_TIMESYNC_STATE_INIT:
765 gb_timesync_enable(timesync_svc);
766 break;
767
768 case GB_TIMESYNC_STATE_WAIT_SVC:
769 dev_err(&timesync_svc->svc->dev,
770 "timeout SVC strobe completion\n");
771 gb_timesync_teardown(timesync_svc);
772 break;
773
774 case GB_TIMESYNC_STATE_AUTHORITATIVE:
775 gb_timesync_authoritative(timesync_svc);
776 break;
777
778 case GB_TIMESYNC_STATE_PING:
779 gb_timesync_ping(timesync_svc);
780 gb_timesync_log_ping_time(timesync_svc);
781 break;
782
783 default:
784 pr_err("Invalid state %d for delayed work\n",
785 timesync_svc->state);
786 break;
787 }
788
789 mutex_unlock(&timesync_svc->mutex);
790 }
791
792 /*
793 * Schedule a new TimeSync INIT or PING operation serialized w/r to
794 * gb_timesync_worker().
795 */
796 static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state)
797 {
798 int ret = 0;
799
800 if (state != GB_TIMESYNC_STATE_INIT && state != GB_TIMESYNC_STATE_PING)
801 return -EINVAL;
802
803 mutex_lock(&timesync_svc->mutex);
804 if (timesync_svc->state == GB_TIMESYNC_STATE_INACTIVE ||
805 timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) {
806 gb_timesync_set_state_atomic(timesync_svc, state);
807 } else {
808 ret = -ENODEV;
809 }
810 mutex_unlock(&timesync_svc->mutex);
811 return ret;
812 }
813
814 static int __gb_timesync_schedule_synchronous(
815 struct gb_timesync_svc *timesync_svc, int state)
816 {
817 unsigned long flags;
818 int ret;
819
820 ret = gb_timesync_schedule(timesync_svc, state);
821 if (ret)
822 return ret;
823
824 ret = wait_event_interruptible(timesync_svc->wait_queue,
825 (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE ||
826 timesync_svc->state == GB_TIMESYNC_STATE_INACTIVE ||
827 timesync_svc->state == GB_TIMESYNC_STATE_INVALID));
828 if (ret)
829 return ret;
830
831 mutex_lock(&timesync_svc->mutex);
832 spin_lock_irqsave(&timesync_svc->spinlock, flags);
833
834 ret = __gb_timesync_get_status(timesync_svc);
835
836 spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
837 mutex_unlock(&timesync_svc->mutex);
838
839 return ret;
840 }
841
842 static struct gb_timesync_svc *gb_timesync_find_timesync_svc(
843 struct gb_host_device *hd)
844 {
845 struct gb_timesync_svc *timesync_svc;
846
847 list_for_each_entry(timesync_svc, &gb_timesync_svc_list, list) {
848 if (timesync_svc->svc == hd->svc)
849 return timesync_svc;
850 }
851 return NULL;
852 }
853
854 static struct gb_timesync_interface *gb_timesync_find_timesync_interface(
855 struct gb_timesync_svc *timesync_svc,
856 struct gb_interface *interface)
857 {
858 struct gb_timesync_interface *timesync_interface;
859
860 list_for_each_entry(timesync_interface, &timesync_svc->interface_list, list) {
861 if (timesync_interface->interface == interface)
862 return timesync_interface;
863 }
864 return NULL;
865 }
866
867 int gb_timesync_schedule_synchronous(struct gb_interface *interface)
868 {
869 int ret;
870 struct gb_timesync_svc *timesync_svc;
871
872 if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
873 return 0;
874
875 mutex_lock(&gb_timesync_svc_list_mutex);
876 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
877 if (!timesync_svc) {
878 ret = -ENODEV;
879 goto done;
880 }
881
882 ret = __gb_timesync_schedule_synchronous(timesync_svc,
883 GB_TIMESYNC_STATE_INIT);
884 done:
885 mutex_unlock(&gb_timesync_svc_list_mutex);
886 return ret;
887 }
888 EXPORT_SYMBOL_GPL(gb_timesync_schedule_synchronous);
889
890 void gb_timesync_schedule_asynchronous(struct gb_interface *interface)
891 {
892 struct gb_timesync_svc *timesync_svc;
893
894 if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
895 return;
896
897 mutex_lock(&gb_timesync_svc_list_mutex);
898 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
899 if (!timesync_svc)
900 goto done;
901
902 gb_timesync_schedule(timesync_svc, GB_TIMESYNC_STATE_INIT);
903 done:
904 mutex_unlock(&gb_timesync_svc_list_mutex);
905 return;
906 }
907 EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous);
908
909 static ssize_t gb_timesync_ping_read(struct file *file, char __user *ubuf,
910 size_t len, loff_t *offset, bool ktime)
911 {
912 struct gb_timesync_svc *timesync_svc = file->f_inode->i_private;
913 char *buf;
914 ssize_t ret = 0;
915
916 mutex_lock(&gb_timesync_svc_list_mutex);
917 mutex_lock(&timesync_svc->mutex);
918 if (list_empty(&timesync_svc->interface_list))
919 ret = -ENODEV;
920 timesync_svc->print_ping = false;
921 mutex_unlock(&timesync_svc->mutex);
922 if (ret)
923 goto done;
924
925 ret = __gb_timesync_schedule_synchronous(timesync_svc,
926 GB_TIMESYNC_STATE_PING);
927 if (ret)
928 goto done;
929
930 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
931 if (!buf) {
932 ret = -ENOMEM;
933 goto done;
934 }
935
936 if (ktime)
937 ret = gb_timesync_log_frame_ktime(timesync_svc, buf, PAGE_SIZE);
938 else
939 ret = gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
940 if (ret > 0)
941 ret = simple_read_from_buffer(ubuf, len, offset, buf, ret);
942 kfree(buf);
943 done:
944 mutex_unlock(&gb_timesync_svc_list_mutex);
945 return ret;
946 }
947
948 static ssize_t gb_timesync_ping_read_frame_time(struct file *file,
949 char __user *buf,
950 size_t len, loff_t *offset)
951 {
952 return gb_timesync_ping_read(file, buf, len, offset, false);
953 }
954
955 static ssize_t gb_timesync_ping_read_frame_ktime(struct file *file,
956 char __user *buf,
957 size_t len, loff_t *offset)
958 {
959 return gb_timesync_ping_read(file, buf, len, offset, true);
960 }
961
962 static const struct file_operations gb_timesync_debugfs_frame_time_ops = {
963 .read = gb_timesync_ping_read_frame_time,
964 };
965
966 static const struct file_operations gb_timesync_debugfs_frame_ktime_ops = {
967 .read = gb_timesync_ping_read_frame_ktime,
968 };
969
970 static int gb_timesync_hd_add(struct gb_timesync_svc *timesync_svc,
971 struct gb_host_device *hd)
972 {
973 struct gb_timesync_host_device *timesync_hd;
974
975 timesync_hd = kzalloc(sizeof(*timesync_hd), GFP_KERNEL);
976 if (!timesync_hd)
977 return -ENOMEM;
978
979 WARN_ON(timesync_svc->timesync_hd);
980 timesync_hd->hd = hd;
981 timesync_svc->timesync_hd = timesync_hd;
982
983 return 0;
984 }
985
986 static void gb_timesync_hd_remove(struct gb_timesync_svc *timesync_svc,
987 struct gb_host_device *hd)
988 {
989 if (timesync_svc->timesync_hd->hd == hd) {
990 kfree(timesync_svc->timesync_hd);
991 timesync_svc->timesync_hd = NULL;
992 return;
993 }
994 WARN_ON(1);
995 }
996
997 int gb_timesync_svc_add(struct gb_svc *svc)
998 {
999 struct gb_timesync_svc *timesync_svc;
1000 int ret;
1001
1002 timesync_svc = kzalloc(sizeof(*timesync_svc), GFP_KERNEL);
1003 if (!timesync_svc)
1004 return -ENOMEM;
1005
1006 timesync_svc->work_queue =
1007 create_singlethread_workqueue("gb-timesync-work_queue");
1008
1009 if (!timesync_svc->work_queue) {
1010 kfree(timesync_svc);
1011 return -ENOMEM;
1012 }
1013
1014 mutex_lock(&gb_timesync_svc_list_mutex);
1015 INIT_LIST_HEAD(&timesync_svc->interface_list);
1016 INIT_DELAYED_WORK(&timesync_svc->delayed_work, gb_timesync_worker);
1017 mutex_init(&timesync_svc->mutex);
1018 spin_lock_init(&timesync_svc->spinlock);
1019 init_waitqueue_head(&timesync_svc->wait_queue);
1020
1021 timesync_svc->svc = svc;
1022 timesync_svc->frame_time_offset = 0;
1023 timesync_svc->capture_ping = false;
1024 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
1025
1026 timesync_svc->frame_time_dentry =
1027 debugfs_create_file("frame-time", S_IRUGO, svc->debugfs_dentry,
1028 timesync_svc,
1029 &gb_timesync_debugfs_frame_time_ops);
1030 timesync_svc->frame_ktime_dentry =
1031 debugfs_create_file("frame-ktime", S_IRUGO, svc->debugfs_dentry,
1032 timesync_svc,
1033 &gb_timesync_debugfs_frame_ktime_ops);
1034
1035 list_add(&timesync_svc->list, &gb_timesync_svc_list);
1036 ret = gb_timesync_hd_add(timesync_svc, svc->hd);
1037 if (ret) {
1038 list_del(&timesync_svc->list);
1039 debugfs_remove(timesync_svc->frame_ktime_dentry);
1040 debugfs_remove(timesync_svc->frame_time_dentry);
1041 destroy_workqueue(timesync_svc->work_queue);
1042 kfree(timesync_svc);
1043 goto done;
1044 }
1045
1046 init_timer(&timesync_svc->ktime_timer);
1047 timesync_svc->ktime_timer.function = gb_timesync_ktime_timer_fn;
1048 timesync_svc->ktime_timer.expires = jiffies + GB_TIMESYNC_KTIME_UPDATE;
1049 timesync_svc->ktime_timer.data = (unsigned long)timesync_svc;
1050 add_timer(&timesync_svc->ktime_timer);
1051 done:
1052 mutex_unlock(&gb_timesync_svc_list_mutex);
1053 return ret;
1054 }
1055 EXPORT_SYMBOL_GPL(gb_timesync_svc_add);
1056
1057 void gb_timesync_svc_remove(struct gb_svc *svc)
1058 {
1059 struct gb_timesync_svc *timesync_svc;
1060 struct gb_timesync_interface *timesync_interface;
1061 struct gb_timesync_interface *next;
1062
1063 mutex_lock(&gb_timesync_svc_list_mutex);
1064 timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1065 if (!timesync_svc)
1066 goto done;
1067
1068 mutex_lock(&timesync_svc->mutex);
1069
1070 gb_timesync_teardown(timesync_svc);
1071 del_timer_sync(&timesync_svc->ktime_timer);
1072
1073 gb_timesync_hd_remove(timesync_svc, svc->hd);
1074 list_for_each_entry_safe(timesync_interface, next,
1075 &timesync_svc->interface_list, list) {
1076 list_del(&timesync_interface->list);
1077 kfree(timesync_interface);
1078 }
1079 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INVALID);
1080 debugfs_remove(timesync_svc->frame_ktime_dentry);
1081 debugfs_remove(timesync_svc->frame_time_dentry);
1082 cancel_delayed_work_sync(&timesync_svc->delayed_work);
1083 destroy_workqueue(timesync_svc->work_queue);
1084 list_del(&timesync_svc->list);
1085
1086 mutex_unlock(&timesync_svc->mutex);
1087
1088 kfree(timesync_svc);
1089 done:
1090 mutex_unlock(&gb_timesync_svc_list_mutex);
1091 }
1092 EXPORT_SYMBOL_GPL(gb_timesync_svc_remove);
1093
1094 /*
1095 * Add a Greybus Interface to the set of TimeSync Interfaces.
1096 */
1097 int gb_timesync_interface_add(struct gb_interface *interface)
1098 {
1099 struct gb_timesync_svc *timesync_svc;
1100 struct gb_timesync_interface *timesync_interface;
1101 int ret = 0;
1102
1103 if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
1104 return 0;
1105
1106 mutex_lock(&gb_timesync_svc_list_mutex);
1107 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1108 if (!timesync_svc) {
1109 ret = -ENODEV;
1110 goto done;
1111 }
1112
1113 timesync_interface = kzalloc(sizeof(*timesync_interface), GFP_KERNEL);
1114 if (!timesync_interface) {
1115 ret = -ENOMEM;
1116 goto done;
1117 }
1118
1119 mutex_lock(&timesync_svc->mutex);
1120 timesync_interface->interface = interface;
1121 list_add(&timesync_interface->list, &timesync_svc->interface_list);
1122 timesync_svc->strobe_mask |= 1 << interface->interface_id;
1123 mutex_unlock(&timesync_svc->mutex);
1124
1125 done:
1126 mutex_unlock(&gb_timesync_svc_list_mutex);
1127 return ret;
1128 }
1129 EXPORT_SYMBOL_GPL(gb_timesync_interface_add);
1130
1131 /*
1132 * Remove a Greybus Interface from the set of TimeSync Interfaces.
1133 */
1134 void gb_timesync_interface_remove(struct gb_interface *interface)
1135 {
1136 struct gb_timesync_svc *timesync_svc;
1137 struct gb_timesync_interface *timesync_interface;
1138
1139 if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
1140 return;
1141
1142 mutex_lock(&gb_timesync_svc_list_mutex);
1143 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1144 if (!timesync_svc)
1145 goto done;
1146
1147 timesync_interface = gb_timesync_find_timesync_interface(timesync_svc,
1148 interface);
1149 if (!timesync_interface)
1150 goto done;
1151
1152 mutex_lock(&timesync_svc->mutex);
1153 timesync_svc->strobe_mask &= ~(1 << interface->interface_id);
1154 list_del(&timesync_interface->list);
1155 kfree(timesync_interface);
1156 mutex_unlock(&timesync_svc->mutex);
1157 done:
1158 mutex_unlock(&gb_timesync_svc_list_mutex);
1159 }
1160 EXPORT_SYMBOL_GPL(gb_timesync_interface_remove);
1161
1162 /*
1163 * Give the authoritative FrameTime to the calling function. Returns zero if we
1164 * are not in GB_TIMESYNC_STATE_ACTIVE.
1165 */
1166 static u64 gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
1167 {
1168 unsigned long flags;
1169 u64 ret;
1170
1171 spin_lock_irqsave(&timesync_svc->spinlock, flags);
1172 if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE)
1173 ret = __gb_timesync_get_frame_time(timesync_svc);
1174 else
1175 ret = 0;
1176 spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
1177 return ret;
1178 }
1179
1180 u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface)
1181 {
1182 struct gb_timesync_svc *timesync_svc;
1183 u64 ret = 0;
1184
1185 mutex_lock(&gb_timesync_svc_list_mutex);
1186 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1187 if (!timesync_svc)
1188 goto done;
1189
1190 ret = gb_timesync_get_frame_time(timesync_svc);
1191 done:
1192 mutex_unlock(&gb_timesync_svc_list_mutex);
1193 return ret;
1194 }
1195 EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_interface);
1196
1197 u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc)
1198 {
1199 struct gb_timesync_svc *timesync_svc;
1200 u64 ret = 0;
1201
1202 mutex_lock(&gb_timesync_svc_list_mutex);
1203 timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1204 if (!timesync_svc)
1205 goto done;
1206
1207 ret = gb_timesync_get_frame_time(timesync_svc);
1208 done:
1209 mutex_unlock(&gb_timesync_svc_list_mutex);
1210 return ret;
1211 }
1212 EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_svc);
1213
1214 /* Incrementally updates the conversion base from FrameTime to ktime */
1215 static void gb_timesync_ktime_timer_fn(unsigned long data)
1216 {
1217 struct gb_timesync_svc *timesync_svc =
1218 (struct gb_timesync_svc *)data;
1219 unsigned long flags;
1220 u64 frame_time;
1221 struct timespec ts;
1222
1223 spin_lock_irqsave(&timesync_svc->spinlock, flags);
1224
1225 if (timesync_svc->state != GB_TIMESYNC_STATE_ACTIVE)
1226 goto done;
1227
1228 ktime_get_ts(&ts);
1229 frame_time = __gb_timesync_get_frame_time(timesync_svc);
1230 gb_timesync_store_ktime(timesync_svc, ts, frame_time);
1231
1232 done:
1233 spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
1234 mod_timer(&timesync_svc->ktime_timer,
1235 jiffies + GB_TIMESYNC_KTIME_UPDATE);
1236 }
1237
1238 int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time,
1239 struct timespec *ts)
1240 {
1241 struct gb_timesync_svc *timesync_svc;
1242 int ret = 0;
1243
1244 mutex_lock(&gb_timesync_svc_list_mutex);
1245 timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1246 if (!timesync_svc) {
1247 ret = -ENODEV;
1248 goto done;
1249 }
1250 ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
1251 done:
1252 mutex_unlock(&gb_timesync_svc_list_mutex);
1253 return ret;
1254 }
1255 EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_svc);
1256
1257 int gb_timesync_to_timespec_by_interface(struct gb_interface *interface,
1258 u64 frame_time, struct timespec *ts)
1259 {
1260 struct gb_timesync_svc *timesync_svc;
1261 int ret = 0;
1262
1263 mutex_lock(&gb_timesync_svc_list_mutex);
1264 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1265 if (!timesync_svc) {
1266 ret = -ENODEV;
1267 goto done;
1268 }
1269
1270 ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
1271 done:
1272 mutex_unlock(&gb_timesync_svc_list_mutex);
1273 return ret;
1274 }
1275 EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_interface);
1276
1277 void gb_timesync_irq(struct gb_timesync_svc *timesync_svc)
1278 {
1279 unsigned long flags;
1280 u64 strobe_time;
1281 bool strobe_is_ping = true;
1282 struct timespec ts;
1283
1284 ktime_get_ts(&ts);
1285 strobe_time = __gb_timesync_get_frame_time(timesync_svc);
1286
1287 spin_lock_irqsave(&timesync_svc->spinlock, flags);
1288
1289 if (timesync_svc->state == GB_TIMESYNC_STATE_PING) {
1290 if (!timesync_svc->capture_ping)
1291 goto done_nolog;
1292 timesync_svc->ap_ping_frame_time = strobe_time;
1293 goto done_log;
1294 } else if (timesync_svc->state != GB_TIMESYNC_STATE_WAIT_SVC) {
1295 goto done_nolog;
1296 }
1297
1298 timesync_svc->strobe_data[timesync_svc->strobe].frame_time = strobe_time;
1299 timesync_svc->strobe_data[timesync_svc->strobe].ts = ts;
1300
1301 if (++timesync_svc->strobe == GB_TIMESYNC_MAX_STROBES) {
1302 gb_timesync_set_state(timesync_svc,
1303 GB_TIMESYNC_STATE_AUTHORITATIVE);
1304 }
1305 strobe_is_ping = false;
1306 done_log:
1307 trace_gb_timesync_irq(strobe_is_ping, timesync_svc->strobe,
1308 GB_TIMESYNC_MAX_STROBES, strobe_time);
1309 done_nolog:
1310 spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
1311 }
1312 EXPORT_SYMBOL(gb_timesync_irq);
1313
1314 int __init gb_timesync_init(void)
1315 {
1316 int ret = 0;
1317
1318 ret = gb_timesync_platform_init();
1319 if (ret) {
1320 pr_err("timesync platform init fail!\n");
1321 return ret;
1322 }
1323
1324 gb_timesync_clock_rate = gb_timesync_platform_get_clock_rate();
1325
1326 /* Calculate nanoseconds and femtoseconds per clock */
1327 gb_timesync_fs_per_clock = FSEC_PER_SEC;
1328 do_div(gb_timesync_fs_per_clock, gb_timesync_clock_rate);
1329 gb_timesync_ns_per_clock = NSEC_PER_SEC;
1330 do_div(gb_timesync_ns_per_clock, gb_timesync_clock_rate);
1331
1332 /* Calculate the maximum number of clocks we will convert to ktime */
1333 gb_timesync_max_ktime_diff =
1334 GB_TIMESYNC_MAX_KTIME_CONVERSION * gb_timesync_clock_rate;
1335
1336 pr_info("Time-Sync @ %lu Hz max ktime conversion +/- %d seconds\n",
1337 gb_timesync_clock_rate, GB_TIMESYNC_MAX_KTIME_CONVERSION);
1338 return 0;
1339 }
1340
1341 void gb_timesync_exit(void)
1342 {
1343 gb_timesync_platform_exit();
1344 }