Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

greybus: timesync: Add gb_timesync_frame_time_to_timespec()

This patch adds gb_timesync_to_timespec_by_svc() and
gb_timesync_to_timespec_by_interface() respectively. These routines will
convert from a given FrameTime to a ktime/timespec within an envelope of
about 17 seconds. The purpose of this routine is to enable reporting of a
FrameTime from a Module such as a Camera Module and to allow the AP to
then convert this timestamp into a Linux-native timestamp such as ktime.
This is useful and required in the v4l layer.

At 19.2MHz the accuracy of this conversion is about .3 femtoseconds per
count, which means at a 1 second offset from the reference the cumulative
error is about 1.59 nanoseconds. 1.59 nanoseconds is still less than 1
clock's worth of error @ 19.2MHz where each clock is 52.0833~ nanoseconds.

We're aiming for a maximum error rate of 30 nanoseconds which means at the
clock rate we are running at, the conversion from a FrameTime to a Linux
ktime/timespec can be plus-or-minus about 17 seconds from the reference
FrameTime/ktime pair before the routine will refuse to convert.

A realistic use-case for this routine is envisaged to be

- Greybus message received
- Some processing takes place - taking milliseconds
- Call into this routine is made
- Actual time between event in Module and conversion in AP < 1 second
- Error rate in conversion at 1.59 nanoseconds is less than 1 clock
@ 19.2MHz

This routine is not designed to allow for conversions for events with
large gaps between the event time and the current reference time for
conversion. Since FrameTime can be a very large integer we cannot convert
an arbitrarily large FrameTime to ktime, the feeling and objective here is
to make an over-provisioned envelope that in practical terms can never be
exceeded by expected use-cases. To convert longer gaps more work would have
to be done but ultimately some limit needs to be imposed and right now 0.3
femotseconds per clock on MSM8994 is both accurate and generous.

Adds:
- timesync.c::gb_timesync_frame_time_to_timespec_by_svc(
struct gb_svc *,
u64 frame_time,
struct timespec *ts)
- gb_svc is a pointer to a standard greybus SVC data structure
- frame_time is a system FrameTime.
- ts is an output parameter which represents the converted FrameTime
as a CLOCK_MONOTONIC timespec value.
- Returns 0 on success or a negative number indicating the type of
error on failure.

- timesync.c::gb_timesync_frame_time_to_timespec_by_interface(
struct gb_interface *,
u64 frame_time,
struct timespec *ts)
- gb_svc is a pointer to a standard greybus Interface data structure
- frame_time is a system FrameTime.
- ts is an output parameter which represents the converted FrameTime
as a CLOCK_MONOTONIC timespec value.
- Returns 0 on success or a negative number indicating the type of
error on failure.

Signed-off-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
Acked-by: Alex Elder <elder@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>

authored by

Bryan O'Donoghue and committed by
Greg Kroah-Hartman
00fdbae1 6da7c889

+247 -25
+243 -25
drivers/staging/greybus/timesync.c
··· 7 7 * Released under the GPLv2 only. 8 8 */ 9 9 #include <linux/debugfs.h> 10 + #include <linux/hrtimer.h> 10 11 #include "greybus.h" 11 12 #include "timesync.h" 12 13 #include "greybus_trace.h" ··· 31 30 #define GB_TIMESYNC_DELAYED_WORK_LONG msecs_to_jiffies(1000) 32 31 #define GB_TIMESYNC_DELAYED_WORK_SHORT msecs_to_jiffies(1) 33 32 #define GB_TIMESYNC_MAX_WAIT_SVC msecs_to_jiffies(5000) 33 + #define GB_TIMESYNC_KTIME_UPDATE msecs_to_jiffies(1000) 34 + #define GB_TIMESYNC_MAX_KTIME_CONVERSION 15 34 35 35 - /* Reported nanoseconds per clock */ 36 + /* Reported nanoseconds/femtoseconds per clock */ 36 37 static u64 gb_timesync_ns_per_clock; 38 + static u64 gb_timesync_fs_per_clock; 39 + 40 + /* Maximum difference we will accept converting FrameTime to ktime */ 41 + static u32 gb_timesync_max_ktime_diff; 37 42 38 43 /* Reported clock rate */ 39 44 static unsigned long gb_timesync_clock_rate; ··· 52 45 53 46 /* Synchronize parallel contexts accessing a valid timesync_svc pointer */ 54 47 static DEFINE_MUTEX(gb_timesync_svc_list_mutex); 48 + 49 + /* Structure to convert from FrameTime to timespec/ktime */ 50 + struct gb_timesync_frame_time_data { 51 + u64 frame_time; 52 + struct timespec ts; 53 + }; 55 54 56 55 struct gb_timesync_svc { 57 56 struct list_head list; ··· 72 59 struct workqueue_struct *work_queue; 73 60 wait_queue_head_t wait_queue; 74 61 struct delayed_work delayed_work; 62 + struct timer_list ktime_timer; 75 63 76 64 /* The current local FrameTime */ 77 65 u64 frame_time_offset; 78 - u64 strobe_time[GB_TIMESYNC_MAX_STROBES]; 66 + struct gb_timesync_frame_time_data strobe_data[GB_TIMESYNC_MAX_STROBES]; 67 + struct gb_timesync_frame_time_data ktime_data; 79 68 80 69 /* The SVC FrameTime and relative AP FrameTime @ last TIMESYNC_PING */ 81 70 u64 svc_ping_frame_time; ··· 115 100 GB_TIMESYNC_STATE_PING = 5, 116 101 GB_TIMESYNC_STATE_ACTIVE = 6, 117 102 }; 103 + 104 + static void gb_timesync_ktime_timer_fn(unsigned long data); 118 105 119 106 static u64 gb_timesync_adjust_count(struct gb_timesync_svc *timesync_svc, 120 107 u64 counts) ··· 238 221 } 239 222 240 223 /* 224 + * Associate a FrameTime with a ktime timestamp represented as struct timespec 225 + * Requires the calling context to hold timesync_svc->mutex 226 + */ 227 + static void gb_timesync_store_ktime(struct gb_timesync_svc *timesync_svc, 228 + struct timespec ts, u64 frame_time) 229 + { 230 + timesync_svc->ktime_data.ts = ts; 231 + timesync_svc->ktime_data.frame_time = frame_time; 232 + } 233 + 234 + /* 241 235 * Find the two pulses that best-match our expected inter-strobe gap and 242 236 * then calculate the difference between the SVC time at the second pulse 243 237 * to the local time at the second pulse. ··· 257 229 u64 *frame_time) 258 230 { 259 231 int i = 0; 260 - u64 delta; 232 + u64 delta, ap_frame_time; 261 233 u64 strobe_delay_ns = GB_TIMESYNC_STROBE_DELAY_US * NSEC_PER_USEC; 262 234 u64 least = 0; 263 235 264 236 for (i = 1; i < GB_TIMESYNC_MAX_STROBES; i++) { 265 - delta = timesync_svc->strobe_time[i] - 266 - timesync_svc->strobe_time[i - 1]; 237 + delta = timesync_svc->strobe_data[i].frame_time - 238 + timesync_svc->strobe_data[i - 1].frame_time; 267 239 delta *= gb_timesync_ns_per_clock; 268 240 delta = gb_timesync_diff(delta, strobe_delay_ns); 269 241 270 242 if (!least || delta < least) { 271 243 least = delta; 272 244 gb_timesync_adjust_to_svc(timesync_svc, frame_time[i], 273 - timesync_svc->strobe_time[i]); 245 + timesync_svc->strobe_data[i].frame_time); 246 + 247 + ap_frame_time = timesync_svc->strobe_data[i].frame_time; 248 + ap_frame_time = gb_timesync_adjust_count(timesync_svc, 249 + ap_frame_time); 250 + gb_timesync_store_ktime(timesync_svc, 251 + timesync_svc->strobe_data[i].ts, 252 + ap_frame_time); 253 + 274 254 pr_debug("adjust %s local %llu svc %llu delta %llu\n", 275 255 timesync_svc->offset_down ? "down" : "up", 276 - timesync_svc->strobe_time[i], frame_time[i], 277 - delta); 256 + timesync_svc->strobe_data[i].frame_time, 257 + frame_time[i], delta); 278 258 } 279 259 } 280 260 } ··· 449 413 /* Schedule a ping to verify the synchronized system time */ 450 414 timesync_svc->print_ping = true; 451 415 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_PING); 416 + } 417 + 418 + static int __gb_timesync_get_status(struct gb_timesync_svc *timesync_svc) 419 + { 420 + int ret = -EINVAL; 421 + 422 + switch (timesync_svc->state) { 423 + case GB_TIMESYNC_STATE_INVALID: 424 + case GB_TIMESYNC_STATE_INACTIVE: 425 + ret = -ENODEV; 426 + break; 427 + case GB_TIMESYNC_STATE_INIT: 428 + case GB_TIMESYNC_STATE_WAIT_SVC: 429 + case GB_TIMESYNC_STATE_AUTHORITATIVE: 430 + case GB_TIMESYNC_STATE_PING: 431 + ret = -EAGAIN; 432 + break; 433 + case GB_TIMESYNC_STATE_ACTIVE: 434 + ret = 0; 435 + break; 436 + } 437 + return ret; 438 + } 439 + 440 + /* 441 + * This routine takes a FrameTime and derives the difference with-respect 442 + * to a reference FrameTime/ktime pair. It then returns the calculated 443 + * ktime based on the difference between the supplied FrameTime and 444 + * the reference FrameTime. 445 + * 446 + * The time difference is calculated to six decimal places. Taking 19.2MHz 447 + * as an example this means we have 52.083333~ nanoseconds per clock or 448 + * 52083333~ femtoseconds per clock. 449 + * 450 + * Naively taking the count difference and converting to 451 + * seconds/nanoseconds would quickly see the 0.0833 component produce 452 + * noticeable errors. For example a time difference of one second would 453 + * loose 19200000 * 0.08333x nanoseconds or 1.59 seconds. 454 + * 455 + * In contrast calculating in femtoseconds the same example of 19200000 * 456 + * 0.000000083333x nanoseconds per count of error is just 1.59 nanoseconds! 457 + * 458 + * Continuing the example of 19.2 MHz we cap the maximum error difference 459 + * at a worst-case 0.3 microseconds over a potential calculation window of 460 + * abount 15 seconds, meaning you can convert a FrameTime that is <= 15 461 + * seconds older/younger than the reference time with a maximum error of 462 + * 0.2385 useconds. Note 19.2MHz is an example frequency not a requirement. 463 + */ 464 + static int gb_timesync_to_timespec(struct gb_timesync_svc *timesync_svc, 465 + u64 frame_time, struct timespec *ts) 466 + { 467 + unsigned long flags; 468 + u64 delta_fs, counts; 469 + u32 sec, nsec; 470 + bool add; 471 + int ret = 0; 472 + 473 + mutex_lock(&timesync_svc->mutex); 474 + spin_lock_irqsave(&timesync_svc->spinlock, flags); 475 + 476 + ret = __gb_timesync_get_status(timesync_svc); 477 + if (ret) 478 + goto done; 479 + 480 + /* Support calculating ktime upwards or downwards from the reference */ 481 + if (frame_time < timesync_svc->ktime_data.frame_time) { 482 + add = false; 483 + counts = timesync_svc->ktime_data.frame_time - frame_time; 484 + } else { 485 + add = true; 486 + counts = frame_time - timesync_svc->ktime_data.frame_time; 487 + } 488 + 489 + /* Enforce the .23 of a usecond boundary @ 19.2MHz */ 490 + if (counts > gb_timesync_max_ktime_diff) { 491 + ret = -EINVAL; 492 + goto done; 493 + } 494 + 495 + /* Determine the time difference in femtoseconds */ 496 + delta_fs = counts * gb_timesync_fs_per_clock; 497 + sec = delta_fs / FSEC_PER_SEC; 498 + nsec = (delta_fs % FSEC_PER_SEC) / 1000000UL; 499 + 500 + if (add) { 501 + /* Add the calculated offset - overflow nanoseconds upwards */ 502 + ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec + sec; 503 + ts->tv_nsec = timesync_svc->ktime_data.ts.tv_nsec + nsec; 504 + if (ts->tv_nsec >= NSEC_PER_SEC) { 505 + ts->tv_sec++; 506 + ts->tv_nsec -= NSEC_PER_SEC; 507 + } 508 + } else { 509 + /* Subtract the difference over/underflow as necessary */ 510 + if (nsec > timesync_svc->ktime_data.ts.tv_nsec) { 511 + sec++; 512 + nsec = nsec + timesync_svc->ktime_data.ts.tv_nsec; 513 + nsec %= NSEC_PER_SEC; 514 + } else { 515 + nsec = timesync_svc->ktime_data.ts.tv_nsec - nsec; 516 + } 517 + /* Cannot return a negative second value */ 518 + if (sec > timesync_svc->ktime_data.ts.tv_sec) { 519 + ret = -EINVAL; 520 + goto done; 521 + } 522 + ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec - sec; 523 + ts->tv_nsec = nsec; 524 + } 525 + done: 526 + spin_unlock_irqrestore(&timesync_svc->spinlock, flags); 527 + mutex_unlock(&timesync_svc->mutex); 528 + return ret; 452 529 } 453 530 454 531 static size_t gb_timesync_log_frame_time(struct gb_timesync_svc *timesync_svc, ··· 765 616 mutex_lock(&timesync_svc->mutex); 766 617 spin_lock_irqsave(&timesync_svc->spinlock, flags); 767 618 768 - switch (timesync_svc->state) { 769 - case GB_TIMESYNC_STATE_INVALID: 770 - case GB_TIMESYNC_STATE_INACTIVE: 771 - ret = -ENODEV; 772 - break; 773 - case GB_TIMESYNC_STATE_INIT: 774 - case GB_TIMESYNC_STATE_WAIT_SVC: 775 - case GB_TIMESYNC_STATE_AUTHORITATIVE: 776 - case GB_TIMESYNC_STATE_PING: 777 - ret = -EAGAIN; 778 - break; 779 - case GB_TIMESYNC_STATE_ACTIVE: 780 - ret = 0; 781 - break; 782 - } 619 + ret = __gb_timesync_get_status(timesync_svc); 783 620 784 621 spin_unlock_irqrestore(&timesync_svc->spinlock, flags); 785 622 mutex_unlock(&timesync_svc->mutex); ··· 945 810 debugfs_remove(timesync_svc->frame_time_dentry); 946 811 destroy_workqueue(timesync_svc->work_queue); 947 812 kfree(timesync_svc); 813 + goto done; 948 814 } 815 + 816 + init_timer(&timesync_svc->ktime_timer); 817 + timesync_svc->ktime_timer.function = gb_timesync_ktime_timer_fn; 818 + timesync_svc->ktime_timer.expires = jiffies + GB_TIMESYNC_KTIME_UPDATE; 819 + timesync_svc->ktime_timer.data = (unsigned long)timesync_svc; 820 + add_timer(&timesync_svc->ktime_timer); 821 + done: 949 822 mutex_unlock(&gb_timesync_svc_list_mutex); 950 823 return ret; 951 824 } ··· 973 830 mutex_lock(&timesync_svc->mutex); 974 831 975 832 gb_timesync_teardown(timesync_svc); 833 + del_timer_sync(&timesync_svc->ktime_timer); 976 834 977 835 gb_timesync_hd_remove(timesync_svc, svc->hd); 978 836 list_for_each_entry_safe(timesync_interface, next, ··· 1115 971 } 1116 972 EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_svc); 1117 973 974 + /* Incrementally updates the conversion base from FrameTime to ktime */ 975 + static void gb_timesync_ktime_timer_fn(unsigned long data) 976 + { 977 + struct gb_timesync_svc *timesync_svc = 978 + (struct gb_timesync_svc *)data; 979 + unsigned long flags; 980 + u64 frame_time; 981 + struct timespec ts; 982 + 983 + spin_lock_irqsave(&timesync_svc->spinlock, flags); 984 + 985 + if (timesync_svc->state != GB_TIMESYNC_STATE_ACTIVE) 986 + goto done; 987 + 988 + ktime_get_ts(&ts); 989 + frame_time = __gb_timesync_get_frame_time(timesync_svc); 990 + gb_timesync_store_ktime(timesync_svc, ts, frame_time); 991 + 992 + done: 993 + spin_unlock_irqrestore(&timesync_svc->spinlock, flags); 994 + mod_timer(&timesync_svc->ktime_timer, 995 + jiffies + GB_TIMESYNC_KTIME_UPDATE); 996 + } 997 + 998 + int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time, 999 + struct timespec *ts) 1000 + { 1001 + struct gb_timesync_svc *timesync_svc; 1002 + int ret = 0; 1003 + 1004 + mutex_lock(&gb_timesync_svc_list_mutex); 1005 + timesync_svc = gb_timesync_find_timesync_svc(svc->hd); 1006 + if (!timesync_svc) { 1007 + ret = -ENODEV; 1008 + goto done; 1009 + } 1010 + ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts); 1011 + done: 1012 + mutex_unlock(&gb_timesync_svc_list_mutex); 1013 + return ret; 1014 + } 1015 + EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_svc); 1016 + 1017 + int gb_timesync_to_timespec_by_interface(struct gb_interface *interface, 1018 + u64 frame_time, struct timespec *ts) 1019 + { 1020 + struct gb_timesync_svc *timesync_svc; 1021 + int ret = 0; 1022 + 1023 + mutex_lock(&gb_timesync_svc_list_mutex); 1024 + timesync_svc = gb_timesync_find_timesync_svc(interface->hd); 1025 + if (!timesync_svc) { 1026 + ret = -ENODEV; 1027 + goto done; 1028 + } 1029 + 1030 + ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts); 1031 + done: 1032 + mutex_unlock(&gb_timesync_svc_list_mutex); 1033 + return ret; 1034 + } 1035 + EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_interface); 1036 + 1118 1037 void gb_timesync_irq(struct gb_timesync_svc *timesync_svc) 1119 1038 { 1120 1039 unsigned long flags; 1121 1040 u64 strobe_time; 1122 1041 bool strobe_is_ping = true; 1042 + struct timespec ts; 1123 1043 1044 + ktime_get_ts(&ts); 1124 1045 strobe_time = __gb_timesync_get_frame_time(timesync_svc); 1125 1046 1126 1047 spin_lock_irqsave(&timesync_svc->spinlock, flags); ··· 1199 990 goto done_nolog; 1200 991 } 1201 992 1202 - timesync_svc->strobe_time[timesync_svc->strobe] = strobe_time; 993 + timesync_svc->strobe_data[timesync_svc->strobe].frame_time = strobe_time; 994 + timesync_svc->strobe_data[timesync_svc->strobe].ts = ts; 1203 995 1204 996 if (++timesync_svc->strobe == GB_TIMESYNC_MAX_STROBES) { 1205 997 gb_timesync_set_state(timesync_svc, ··· 1226 1016 } 1227 1017 1228 1018 gb_timesync_clock_rate = gb_timesync_platform_get_clock_rate(); 1019 + 1020 + /* Calculate nanoseconds and femtoseconds per clock */ 1021 + gb_timesync_fs_per_clock = FSEC_PER_SEC / gb_timesync_clock_rate; 1229 1022 gb_timesync_ns_per_clock = NSEC_PER_SEC / gb_timesync_clock_rate; 1230 1023 1231 - pr_info("Time-Sync timer frequency %lu Hz\n", gb_timesync_clock_rate); 1024 + /* Calculate the maximum number of clocks we will convert to ktime */ 1025 + gb_timesync_max_ktime_diff = 1026 + GB_TIMESYNC_MAX_KTIME_CONVERSION * gb_timesync_clock_rate; 1027 + 1028 + pr_info("Time-Sync @ %lu Hz max ktime conversion +/- %d seconds\n", 1029 + gb_timesync_clock_rate, GB_TIMESYNC_MAX_KTIME_CONVERSION); 1232 1030 return 0; 1233 1031 } 1234 1032
+4
drivers/staging/greybus/timesync.h
··· 31 31 32 32 u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface); 33 33 u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc); 34 + int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time, 35 + struct timespec *ts); 36 + int gb_timesync_to_timespec_by_interface(struct gb_interface *interface, 37 + u64 frame_time, struct timespec *ts); 34 38 35 39 int gb_timesync_schedule_synchronous(struct gb_interface *intf); 36 40 void gb_timesync_schedule_asynchronous(struct gb_interface *intf);