Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

greybus: timesync: Add timesync core driver

This patch adds the core timesync functionality.

0. arche-platform.c/arche-apb-ctrl.c
Modifies the platform layer to hook the incoming TIME_SYNC signal up to
the timesync strobe IRQ handler. If the arche-platform driver can't
satisfy the request for the wake-detect line, it will return -EAGAIN and
the calling work-queue must reschedule the attempt to get exclusive
access to the wake-detect pin logic. A private data field is added to
the arche-platform driver to enable passing of a timesync pointer to the
ISR responsible for synchronizing time.

1. timesync.c
A new file added which contains all of the logic associated with sending
greybus commands to SVC, APBx or Interfaces to enable, disable and
disseminate timing information.

2. timesync_platform.c
Any platform/arch specific code goes into timesync_platform.c.
Originally the idea was to keep the x86 and ARM arch dependencies in a
timesync_platform_arch.c file - however with further refinement that's
currently not necessary however just-in-case it becomes necessary to
resuscitate arch or platform specific methods for accessing timer
resources that access shouldn't be part of the core timesync.c logic and
so for the moment we access these timer resources through a thin access
layer in timesync_platform.c. Expect this to go away long term ideally.

Signed-off-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
Acked-by: Alex Elder <elder@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>

authored by

Bryan O'Donoghue and committed by
Greg Kroah-Hartman
970dc85b 698282f6

+1288 -13
+3 -1
drivers/staging/greybus/Makefile
··· 10 10 svc.o \ 11 11 svc_watchdog.o \ 12 12 bootrom.o \ 13 - operation.o 13 + operation.o \ 14 + timesync.o \ 15 + timesync_platform.o 14 16 15 17 gb-gbphy-y := gbphy.o 16 18
+15 -1
drivers/staging/greybus/arche-apb-ctrl.c
··· 90 90 } 91 91 } 92 92 93 - gpio_set_value(apb->boot_ret_gpio, 0); 93 + apb_bootret_deassert(dev); 94 94 95 95 /* On DB3 clock was not mandatory */ 96 96 if (gpio_is_valid(apb->clk_en_gpio)) ··· 182 182 apb->state = ARCHE_PLATFORM_STATE_OFF; 183 183 184 184 /* TODO: May have to send an event to SVC about this exit */ 185 + } 186 + 187 + void apb_bootret_assert(struct device *dev) 188 + { 189 + struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev); 190 + 191 + gpio_set_value(apb->boot_ret_gpio, 1); 192 + } 193 + 194 + void apb_bootret_deassert(struct device *dev) 195 + { 196 + struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev); 197 + 198 + gpio_set_value(apb->boot_ret_gpio, 0); 185 199 } 186 200 187 201 int apb_ctrl_coldboot(struct device *dev)
+62 -9
drivers/staging/greybus/arche-platform.c
··· 22 22 #include <linux/suspend.h> 23 23 #include <linux/time.h> 24 24 #include "arche_platform.h" 25 + #include "greybus.h" 25 26 26 27 #include <linux/usb/usb3613.h> 27 28 ··· 35 34 WD_STATE_STANDBYBOOT_TRIG, /* As of now not used ?? */ 36 35 WD_STATE_COLDBOOT_START, /* Cold boot process started */ 37 36 WD_STATE_STANDBYBOOT_START, /* Not used */ 37 + WD_STATE_TIMESYNC, 38 38 }; 39 39 40 40 struct arche_platform_drvdata { ··· 59 57 int wake_detect_irq; 60 58 spinlock_t wake_lock; /* Protect wake_detect_state */ 61 59 struct mutex platform_state_mutex; /* Protect state */ 60 + wait_queue_head_t wq; /* WQ for arche_pdata->state */ 62 61 unsigned long wake_detect_start; 63 62 struct notifier_block pm_notifier; 64 63 65 64 struct device *dev; 65 + struct gb_timesync_svc *timesync_svc_pdata; 66 66 }; 67 67 68 - /* Requires calling context to hold arche_pdata->spinlock */ 68 + static int arche_apb_bootret_assert(struct device *dev, void *data) 69 + { 70 + apb_bootret_assert(dev); 71 + return 0; 72 + } 73 + 74 + static int arche_apb_bootret_deassert(struct device *dev, void *data) 75 + { 76 + apb_bootret_deassert(dev); 77 + return 0; 78 + } 79 + 80 + /* Requires calling context to hold arche_pdata->platform_state_mutex */ 69 81 static void arche_platform_set_state(struct arche_platform_drvdata *arche_pdata, 70 82 enum arche_platform_state state) 71 83 { ··· 110 94 * satisfy the requested state-transition or -EINVAL for all other 111 95 * state-transition requests. 112 96 */ 113 - int arche_platform_change_state(enum arche_platform_state state) 97 + int arche_platform_change_state(enum arche_platform_state state, 98 + struct gb_timesync_svc *timesync_svc_pdata) 114 99 { 115 100 struct arche_platform_drvdata *arche_pdata; 116 101 struct platform_device *pdev; ··· 135 118 136 119 mutex_lock(&arche_pdata->platform_state_mutex); 137 120 spin_lock_irqsave(&arche_pdata->wake_lock, flags); 138 - if (arche_pdata->wake_detect_state != WD_STATE_IDLE) { 139 - dev_err(arche_pdata->dev, 140 - "driver busy with wake/detect line ops\n"); 141 - goto exit; 142 - } 143 121 144 122 if (arche_pdata->state == state) { 145 123 ret = 0; ··· 143 131 144 132 switch (state) { 145 133 case ARCHE_PLATFORM_STATE_TIME_SYNC: 146 - disable_irq(arche_pdata->wake_detect_irq); 134 + if (arche_pdata->state != ARCHE_PLATFORM_STATE_ACTIVE) { 135 + ret = -EINVAL; 136 + goto exit; 137 + } 138 + if (arche_pdata->wake_detect_state != WD_STATE_IDLE) { 139 + dev_err(arche_pdata->dev, 140 + "driver busy with wake/detect line ops\n"); 141 + goto exit; 142 + } 143 + device_for_each_child(arche_pdata->dev, NULL, 144 + arche_apb_bootret_assert); 145 + arche_pdata->wake_detect_state = WD_STATE_TIMESYNC; 147 146 break; 148 147 case ARCHE_PLATFORM_STATE_ACTIVE: 149 - enable_irq(arche_pdata->wake_detect_irq); 148 + if (arche_pdata->state != ARCHE_PLATFORM_STATE_TIME_SYNC) { 149 + ret = -EINVAL; 150 + goto exit; 151 + } 152 + device_for_each_child(arche_pdata->dev, NULL, 153 + arche_apb_bootret_deassert); 154 + arche_pdata->wake_detect_state = WD_STATE_IDLE; 150 155 break; 151 156 case ARCHE_PLATFORM_STATE_OFF: 152 157 case ARCHE_PLATFORM_STATE_STANDBY: ··· 176 147 "invalid state transition request\n"); 177 148 goto exit; 178 149 } 150 + arche_pdata->timesync_svc_pdata = timesync_svc_pdata; 179 151 arche_platform_set_state(arche_pdata, state); 152 + if (state == ARCHE_PLATFORM_STATE_ACTIVE) 153 + wake_up(&arche_pdata->wq); 154 + 180 155 ret = 0; 181 156 exit: 182 157 spin_unlock_irqrestore(&arche_pdata->wake_lock, flags); ··· 285 252 286 253 spin_lock_irqsave(&arche_pdata->wake_lock, flags); 287 254 255 + if (arche_pdata->wake_detect_state == WD_STATE_TIMESYNC) { 256 + gb_timesync_irq(arche_pdata->timesync_svc_pdata); 257 + goto exit; 258 + } 259 + 288 260 if (gpio_get_value(arche_pdata->wake_detect_gpio)) { 289 261 /* wake/detect rising */ 290 262 ··· 331 293 } 332 294 } 333 295 296 + exit: 334 297 spin_unlock_irqrestore(&arche_pdata->wake_lock, flags); 335 298 336 299 return IRQ_HANDLED; ··· 441 402 struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev); 442 403 int ret = 0; 443 404 405 + retry: 444 406 mutex_lock(&arche_pdata->platform_state_mutex); 407 + if (arche_pdata->state == ARCHE_PLATFORM_STATE_TIME_SYNC) { 408 + mutex_unlock(&arche_pdata->platform_state_mutex); 409 + ret = wait_event_interruptible( 410 + arche_pdata->wq, 411 + arche_pdata->state != ARCHE_PLATFORM_STATE_TIME_SYNC); 412 + if (ret) 413 + return ret; 414 + goto retry; 415 + } 445 416 446 417 if (sysfs_streq(buf, "off")) { 447 418 if (arche_pdata->state == ARCHE_PLATFORM_STATE_OFF) ··· 659 610 660 611 spin_lock_init(&arche_pdata->wake_lock); 661 612 mutex_init(&arche_pdata->platform_state_mutex); 613 + init_waitqueue_head(&arche_pdata->wq); 662 614 arche_pdata->wake_detect_irq = 663 615 gpio_to_irq(arche_pdata->wake_detect_gpio); 664 616 ··· 702 652 dev_err(dev, "failed to register pm notifier %d\n", ret); 703 653 goto err_populate; 704 654 } 655 + 656 + /* Register callback pointer */ 657 + arche_platform_change_state_cb = arche_platform_change_state; 705 658 706 659 dev_info(dev, "Device registered successfully\n"); 707 660 return 0;
+8 -1
drivers/staging/greybus/arche_platform.h
··· 10 10 #ifndef __ARCHE_PLATFORM_H 11 11 #define __ARCHE_PLATFORM_H 12 12 13 + #include "timesync.h" 14 + 13 15 enum arche_platform_state { 14 16 ARCHE_PLATFORM_STATE_OFF, 15 17 ARCHE_PLATFORM_STATE_ACTIVE, ··· 20 18 ARCHE_PLATFORM_STATE_TIME_SYNC, 21 19 }; 22 20 23 - int arche_platform_change_state(enum arche_platform_state state); 21 + int arche_platform_change_state(enum arche_platform_state state, 22 + struct gb_timesync_svc *pdata); 24 23 24 + extern int (*arche_platform_change_state_cb)(enum arche_platform_state state, 25 + struct gb_timesync_svc *pdata); 25 26 int __init arche_apb_init(void); 26 27 void __exit arche_apb_exit(void); 27 28 ··· 33 28 int apb_ctrl_fw_flashing(struct device *dev); 34 29 int apb_ctrl_standby_boot(struct device *dev); 35 30 void apb_ctrl_poweroff(struct device *dev); 31 + void apb_bootret_assert(struct device *dev); 32 + void apb_bootret_deassert(struct device *dev); 36 33 37 34 #endif /* __ARCHE_PLATFORM_H */
+1 -1
drivers/staging/greybus/greybus.h
··· 33 33 #include "bundle.h" 34 34 #include "connection.h" 35 35 #include "operation.h" 36 - 36 + #include "timesync.h" 37 37 38 38 /* Matches up with the Greybus Protocol specification document */ 39 39 #define GREYBUS_VERSION_MAJOR 0x00
+55
drivers/staging/greybus/interface.c
··· 865 865 intf->enabled = false; 866 866 } 867 867 868 + /* 869 + * Enable TimeSync on an Interface control connection. 870 + * 871 + * Locking: Takes and releases the interface mutex. 872 + */ 873 + int gb_interface_timesync_enable(struct gb_interface *intf, u8 count, 874 + u64 frame_time, u32 strobe_delay, u32 refclk) 875 + { 876 + int ret = -ENODEV; 877 + 878 + mutex_lock(&intf->mutex); 879 + if (intf->enabled) { 880 + ret = gb_control_timesync_enable(intf->control, count, 881 + frame_time, strobe_delay, 882 + refclk); 883 + } 884 + mutex_unlock(&intf->mutex); 885 + return ret; 886 + } 887 + 888 + /* 889 + * Disable TimeSync on an Interface control connection. 890 + * 891 + * Locking: Takes and releases the interface mutex. 892 + */ 893 + int gb_interface_timesync_disable(struct gb_interface *intf) 894 + { 895 + int ret = -ENODEV; 896 + 897 + mutex_lock(&intf->mutex); 898 + if (intf->enabled) 899 + ret = gb_control_timesync_disable(intf->control); 900 + mutex_unlock(&intf->mutex); 901 + return ret; 902 + } 903 + 904 + /* 905 + * Transmit the Authoritative FrameTime via an Interface control connection. 906 + * 907 + * Locking: Takes and releases the interface mutex. 908 + */ 909 + int gb_interface_timesync_authoritative(struct gb_interface *intf, 910 + u64 *frame_time) 911 + { 912 + int ret = -ENODEV; 913 + 914 + mutex_lock(&intf->mutex); 915 + if (intf->enabled) { 916 + ret = gb_control_timesync_authoritative(intf->control, 917 + frame_time); 918 + } 919 + mutex_unlock(&intf->mutex); 920 + return ret; 921 + } 922 + 868 923 /* Register an interface. */ 869 924 int gb_interface_add(struct gb_interface *intf) 870 925 {
+5
drivers/staging/greybus/interface.h
··· 58 58 void gb_interface_deactivate(struct gb_interface *intf); 59 59 int gb_interface_enable(struct gb_interface *intf); 60 60 void gb_interface_disable(struct gb_interface *intf); 61 + int gb_interface_timesync_enable(struct gb_interface *intf, u8 count, 62 + u64 frame_time, u32 strobe_delay, u32 refclk); 63 + int gb_interface_timesync_authoritative(struct gb_interface *intf, 64 + u64 *frame_time); 65 + int gb_interface_timesync_disable(struct gb_interface *intf); 61 66 int gb_interface_add(struct gb_interface *intf); 62 67 void gb_interface_del(struct gb_interface *intf); 63 68 void gb_interface_put(struct gb_interface *intf);
+1021
drivers/staging/greybus/timesync.c
··· 1 + /* 2 + * TimeSync API driver. 3 + * 4 + * Copyright 2016 Google Inc. 5 + * Copyright 2016 Linaro Ltd. 6 + * 7 + * Released under the GPLv2 only. 8 + */ 9 + #include <linux/debugfs.h> 10 + #include "greybus.h" 11 + #include "timesync.h" 12 + 13 + /* 14 + * Minimum inter-strobe value of one millisecond is chosen because it 15 + * just-about fits the common definition of a jiffy. 16 + * 17 + * Maximum value OTOH is constrained by the number of bits the SVC can fit 18 + * into a 16 bit up-counter. The SVC configures the timer in microseconds 19 + * so the maximum allowable value is 65535 microseconds. We clip that value 20 + * to 10000 microseconds for the sake of using nice round base 10 numbers 21 + * and since right-now there's no imaginable use-case requiring anything 22 + * other than a one millisecond inter-strobe time, let alone something 23 + * higher than ten milliseconds. 24 + */ 25 + #define GB_TIMESYNC_STROBE_DELAY_US 1000 26 + #define GB_TIMESYNC_DEFAULT_OFFSET_US 1000 27 + 28 + /* Work queue timers long, short and SVC strobe timeout */ 29 + #define GB_TIMESYNC_DELAYED_WORK_LONG msecs_to_jiffies(1000) 30 + #define GB_TIMESYNC_DELAYED_WORK_SHORT msecs_to_jiffies(1) 31 + #define GB_TIMESYNC_MAX_WAIT_SVC msecs_to_jiffies(5000) 32 + 33 + /* Reported nanoseconds per clock */ 34 + static u64 gb_timesync_ns_per_clock; 35 + 36 + /* Reported clock rate */ 37 + static unsigned long gb_timesync_clock_rate; 38 + 39 + /* Workqueue */ 40 + static void gb_timesync_worker(struct work_struct *work); 41 + 42 + /* List of SVCs with one FrameTime per SVC */ 43 + static LIST_HEAD(gb_timesync_svc_list); 44 + 45 + /* Synchronize parallel contexts accessing a valid timesync_svc pointer */ 46 + static DEFINE_MUTEX(gb_timesync_svc_list_mutex); 47 + 48 + struct gb_timesync_svc { 49 + struct list_head list; 50 + struct list_head interface_list; 51 + struct gb_svc *svc; 52 + struct gb_timesync_host_device *timesync_hd; 53 + 54 + spinlock_t spinlock; /* Per SVC spinlock to sync with ISR */ 55 + struct mutex mutex; /* Per SVC mutex for regular synchronization */ 56 + 57 + struct dentry *frame_time_dentry; 58 + struct workqueue_struct *work_queue; 59 + wait_queue_head_t wait_queue; 60 + struct delayed_work delayed_work; 61 + 62 + /* The current local FrameTime */ 63 + u64 frame_time_offset; 64 + u64 strobe_time[GB_TIMESYNC_MAX_STROBES]; 65 + 66 + /* The SVC FrameTime and relative AP FrameTime @ last TIMESYNC_PING */ 67 + u64 svc_ping_frame_time; 68 + u64 ap_ping_frame_time; 69 + 70 + /* Transitory settings */ 71 + u32 strobe_mask; 72 + bool offset_down; 73 + bool print_ping; 74 + bool capture_ping; 75 + int strobe; 76 + 77 + /* Current state */ 78 + int state; 79 + }; 80 + 81 + struct gb_timesync_host_device { 82 + struct list_head list; 83 + struct gb_host_device *hd; 84 + u64 ping_frame_time; 85 + }; 86 + 87 + struct gb_timesync_interface { 88 + struct list_head list; 89 + struct gb_interface *interface; 90 + u64 ping_frame_time; 91 + }; 92 + 93 + enum gb_timesync_state { 94 + GB_TIMESYNC_STATE_INVALID = 0, 95 + GB_TIMESYNC_STATE_INACTIVE = 1, 96 + GB_TIMESYNC_STATE_INIT = 2, 97 + GB_TIMESYNC_STATE_WAIT_SVC = 3, 98 + GB_TIMESYNC_STATE_AUTHORITATIVE = 4, 99 + GB_TIMESYNC_STATE_PING = 5, 100 + GB_TIMESYNC_STATE_ACTIVE = 6, 101 + }; 102 + 103 + static u64 gb_timesync_adjust_count(struct gb_timesync_svc *timesync_svc, 104 + u64 counts) 105 + { 106 + if (timesync_svc->offset_down) 107 + return counts - timesync_svc->frame_time_offset; 108 + else 109 + return counts + timesync_svc->frame_time_offset; 110 + } 111 + 112 + /* 113 + * This function provides the authoritative FrameTime to a calling function. It 114 + * is designed to be lockless and should remain that way the caller is assumed 115 + * to be state-aware. 116 + */ 117 + static u64 __gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc) 118 + { 119 + u64 clocks = gb_timesync_platform_get_counter(); 120 + 121 + return gb_timesync_adjust_count(timesync_svc, clocks); 122 + } 123 + 124 + static void gb_timesync_schedule_svc_timeout(struct gb_timesync_svc 125 + *timesync_svc) 126 + { 127 + queue_delayed_work(timesync_svc->work_queue, 128 + &timesync_svc->delayed_work, 129 + GB_TIMESYNC_MAX_WAIT_SVC); 130 + } 131 + 132 + static void gb_timesync_set_state(struct gb_timesync_svc *timesync_svc, 133 + int state) 134 + { 135 + switch (state) { 136 + case GB_TIMESYNC_STATE_INVALID: 137 + timesync_svc->state = state; 138 + wake_up(&timesync_svc->wait_queue); 139 + break; 140 + case GB_TIMESYNC_STATE_INACTIVE: 141 + if (timesync_svc->state != GB_TIMESYNC_STATE_INIT) { 142 + timesync_svc->state = state; 143 + wake_up(&timesync_svc->wait_queue); 144 + } 145 + break; 146 + case GB_TIMESYNC_STATE_INIT: 147 + if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) { 148 + timesync_svc->strobe = 0; 149 + timesync_svc->frame_time_offset = 0; 150 + timesync_svc->state = state; 151 + cancel_delayed_work(&timesync_svc->delayed_work); 152 + queue_delayed_work(timesync_svc->work_queue, 153 + &timesync_svc->delayed_work, 154 + GB_TIMESYNC_DELAYED_WORK_LONG); 155 + } 156 + break; 157 + case GB_TIMESYNC_STATE_WAIT_SVC: 158 + if (timesync_svc->state == GB_TIMESYNC_STATE_INIT) 159 + timesync_svc->state = state; 160 + break; 161 + case GB_TIMESYNC_STATE_AUTHORITATIVE: 162 + if (timesync_svc->state == GB_TIMESYNC_STATE_WAIT_SVC) { 163 + timesync_svc->state = state; 164 + cancel_delayed_work(&timesync_svc->delayed_work); 165 + queue_delayed_work(timesync_svc->work_queue, 166 + &timesync_svc->delayed_work, 0); 167 + } 168 + break; 169 + case GB_TIMESYNC_STATE_PING: 170 + if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) { 171 + timesync_svc->state = state; 172 + queue_delayed_work(timesync_svc->work_queue, 173 + &timesync_svc->delayed_work, 174 + GB_TIMESYNC_DELAYED_WORK_SHORT); 175 + } 176 + break; 177 + case GB_TIMESYNC_STATE_ACTIVE: 178 + if (timesync_svc->state == GB_TIMESYNC_STATE_AUTHORITATIVE || 179 + timesync_svc->state == GB_TIMESYNC_STATE_PING) { 180 + timesync_svc->state = state; 181 + wake_up(&timesync_svc->wait_queue); 182 + } 183 + break; 184 + } 185 + 186 + if (WARN_ON(timesync_svc->state != state)) { 187 + pr_err("Invalid state transition %d=>%d\n", 188 + timesync_svc->state, state); 189 + } 190 + } 191 + 192 + static void gb_timesync_set_state_atomic(struct gb_timesync_svc *timesync_svc, 193 + int state) 194 + { 195 + unsigned long flags; 196 + 197 + spin_lock_irqsave(&timesync_svc->spinlock, flags); 198 + gb_timesync_set_state(timesync_svc, state); 199 + spin_unlock_irqrestore(&timesync_svc->spinlock, flags); 200 + } 201 + 202 + static u64 gb_timesync_diff(u64 x, u64 y) 203 + { 204 + if (x > y) 205 + return x - y; 206 + else 207 + return y - x; 208 + } 209 + 210 + static void gb_timesync_adjust_to_svc(struct gb_timesync_svc *svc, 211 + u64 svc_frame_time, u64 ap_frame_time) 212 + { 213 + if (svc_frame_time > ap_frame_time) { 214 + svc->frame_time_offset = svc_frame_time - ap_frame_time; 215 + svc->offset_down = false; 216 + } else { 217 + svc->frame_time_offset = ap_frame_time - svc_frame_time; 218 + svc->offset_down = true; 219 + } 220 + } 221 + 222 + /* 223 + * Find the two pulses that best-match our expected inter-strobe gap and 224 + * then calculate the difference between the SVC time at the second pulse 225 + * to the local time at the second pulse. 226 + */ 227 + static void gb_timesync_collate_frame_time(struct gb_timesync_svc *timesync_svc, 228 + u64 *frame_time) 229 + { 230 + int i = 0; 231 + u64 delta; 232 + u64 strobe_delay_ns = GB_TIMESYNC_STROBE_DELAY_US * NSEC_PER_USEC; 233 + u64 least = 0; 234 + 235 + for (i = 1; i < GB_TIMESYNC_MAX_STROBES; i++) { 236 + delta = timesync_svc->strobe_time[i] - 237 + timesync_svc->strobe_time[i - 1]; 238 + delta *= gb_timesync_ns_per_clock; 239 + delta = gb_timesync_diff(delta, strobe_delay_ns); 240 + 241 + if (!least || delta < least) { 242 + least = delta; 243 + gb_timesync_adjust_to_svc(timesync_svc, frame_time[i], 244 + timesync_svc->strobe_time[i]); 245 + pr_debug("adjust %s local %llu svc %llu delta %llu\n", 246 + timesync_svc->offset_down ? "down" : "up", 247 + timesync_svc->strobe_time[i], frame_time[i], 248 + delta); 249 + } 250 + } 251 + } 252 + 253 + static void gb_timesync_teardown(struct gb_timesync_svc *timesync_svc) 254 + { 255 + struct gb_timesync_interface *timesync_interface; 256 + struct gb_svc *svc = timesync_svc->svc; 257 + struct gb_interface *interface; 258 + struct gb_host_device *hd; 259 + int ret; 260 + 261 + list_for_each_entry(timesync_interface, 262 + &timesync_svc->interface_list, list) { 263 + interface = timesync_interface->interface; 264 + ret = gb_interface_timesync_disable(interface); 265 + if (ret) { 266 + dev_err(&interface->dev, 267 + "interface timesync_disable %d\n", ret); 268 + } 269 + } 270 + 271 + hd = timesync_svc->timesync_hd->hd; 272 + ret = hd->driver->timesync_disable(hd); 273 + if (ret < 0) { 274 + dev_err(&hd->dev, "host timesync_disable %d\n", 275 + ret); 276 + } 277 + 278 + gb_svc_timesync_wake_pins_release(svc); 279 + gb_svc_timesync_disable(svc); 280 + gb_timesync_platform_unlock_bus(); 281 + 282 + gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE); 283 + } 284 + 285 + static void gb_timesync_platform_lock_bus_fail(struct gb_timesync_svc 286 + *timesync_svc, int ret) 287 + { 288 + if (ret == -EAGAIN) { 289 + gb_timesync_set_state(timesync_svc, timesync_svc->state); 290 + } else { 291 + pr_err("Failed to lock timesync bus %d\n", ret); 292 + gb_timesync_set_state(timesync_svc, GB_TIMESYNC_STATE_INACTIVE); 293 + } 294 + } 295 + 296 + static void gb_timesync_enable(struct gb_timesync_svc *timesync_svc) 297 + { 298 + struct gb_svc *svc = timesync_svc->svc; 299 + struct gb_host_device *hd; 300 + struct gb_timesync_interface *timesync_interface; 301 + struct gb_interface *interface; 302 + u64 init_frame_time; 303 + unsigned long clock_rate = gb_timesync_clock_rate; 304 + int ret; 305 + 306 + /* 307 + * Get access to the wake pins in the AP and SVC 308 + * Release these pins either in gb_timesync_teardown() or in 309 + * gb_timesync_authoritative() 310 + */ 311 + ret = gb_timesync_platform_lock_bus(timesync_svc); 312 + if (ret < 0) { 313 + gb_timesync_platform_lock_bus_fail(timesync_svc, ret); 314 + return; 315 + } 316 + ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask); 317 + if (ret) { 318 + dev_err(&svc->dev, 319 + "gb_svc_timesync_wake_pins_acquire %d\n", ret); 320 + gb_timesync_teardown(timesync_svc); 321 + return; 322 + } 323 + 324 + /* Choose an initial time in the future */ 325 + init_frame_time = __gb_timesync_get_frame_time(timesync_svc) + 100000UL; 326 + 327 + /* Send enable command to all relevant participants */ 328 + list_for_each_entry(timesync_interface, &timesync_svc->interface_list, 329 + list) { 330 + interface = timesync_interface->interface; 331 + ret = gb_interface_timesync_enable(interface, 332 + GB_TIMESYNC_MAX_STROBES, 333 + init_frame_time, 334 + GB_TIMESYNC_STROBE_DELAY_US, 335 + clock_rate); 336 + if (ret) { 337 + dev_err(&interface->dev, 338 + "interface timesync_enable %d\n", ret); 339 + } 340 + } 341 + 342 + hd = timesync_svc->timesync_hd->hd; 343 + ret = hd->driver->timesync_enable(hd, GB_TIMESYNC_MAX_STROBES, 344 + init_frame_time, 345 + GB_TIMESYNC_STROBE_DELAY_US, 346 + clock_rate); 347 + if (ret < 0) { 348 + dev_err(&hd->dev, "host timesync_enable %d\n", 349 + ret); 350 + } 351 + 352 + gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_WAIT_SVC); 353 + ret = gb_svc_timesync_enable(svc, GB_TIMESYNC_MAX_STROBES, 354 + init_frame_time, 355 + GB_TIMESYNC_STROBE_DELAY_US, 356 + clock_rate); 357 + if (ret) { 358 + dev_err(&svc->dev, 359 + "gb_svc_timesync_enable %d\n", ret); 360 + gb_timesync_teardown(timesync_svc); 361 + return; 362 + } 363 + 364 + /* Schedule a timeout waiting for SVC to complete strobing */ 365 + gb_timesync_schedule_svc_timeout(timesync_svc); 366 + } 367 + 368 + static void gb_timesync_authoritative(struct gb_timesync_svc *timesync_svc) 369 + { 370 + struct gb_svc *svc = timesync_svc->svc; 371 + struct gb_host_device *hd; 372 + struct gb_timesync_interface *timesync_interface; 373 + struct gb_interface *interface; 374 + u64 svc_frame_time[GB_TIMESYNC_MAX_STROBES]; 375 + int ret; 376 + 377 + /* Get authoritative time from SVC and adjust local clock */ 378 + ret = gb_svc_timesync_authoritative(svc, svc_frame_time); 379 + if (ret) { 380 + dev_err(&svc->dev, 381 + "gb_svc_timesync_authoritative %d\n", ret); 382 + gb_timesync_teardown(timesync_svc); 383 + return; 384 + } 385 + gb_timesync_collate_frame_time(timesync_svc, svc_frame_time); 386 + 387 + /* Transmit authoritative time to downstream slaves */ 388 + hd = timesync_svc->timesync_hd->hd; 389 + ret = hd->driver->timesync_authoritative(hd, svc_frame_time); 390 + if (ret < 0) 391 + dev_err(&hd->dev, "host timesync_authoritative %d\n", ret); 392 + 393 + list_for_each_entry(timesync_interface, 394 + &timesync_svc->interface_list, list) { 395 + interface = timesync_interface->interface; 396 + ret = gb_interface_timesync_authoritative( 397 + interface, 398 + svc_frame_time); 399 + if (ret) { 400 + dev_err(&interface->dev, 401 + "interface timesync_authoritative %d\n", ret); 402 + } 403 + } 404 + 405 + /* Release wake pins */ 406 + gb_svc_timesync_wake_pins_release(svc); 407 + gb_timesync_platform_unlock_bus(); 408 + 409 + /* Transition to state ACTIVE */ 410 + gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE); 411 + 412 + /* Schedule a ping to verify the synchronized system time */ 413 + timesync_svc->print_ping = true; 414 + gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_PING); 415 + } 416 + 417 + static size_t gb_timesync_log_frame_time(struct gb_timesync_svc *timesync_svc, 418 + char *buf, size_t buflen) 419 + { 420 + struct gb_svc *svc = timesync_svc->svc; 421 + struct gb_host_device *hd; 422 + struct gb_timesync_interface *timesync_interface; 423 + struct gb_interface *interface; 424 + unsigned int len; 425 + size_t off; 426 + 427 + /* AP/SVC */ 428 + memset(buf, 0x00, buflen); 429 + off = snprintf(buf, buflen, "timesync: ping-time ap=%llu %s=%llu ", 430 + timesync_svc->ap_ping_frame_time, dev_name(&svc->dev), 431 + timesync_svc->svc_ping_frame_time); 432 + len = buflen - off; 433 + 434 + /* APB/GPB */ 435 + if (len < buflen) { 436 + hd = timesync_svc->timesync_hd->hd; 437 + off += snprintf(&buf[off], len, "%s=%llu ", dev_name(&hd->dev), 438 + timesync_svc->timesync_hd->ping_frame_time); 439 + len = buflen - off; 440 + } 441 + 442 + list_for_each_entry(timesync_interface, 443 + &timesync_svc->interface_list, list) { 444 + if (len < buflen) { 445 + interface = timesync_interface->interface; 446 + off += snprintf(&buf[off], len, "%s=%llu ", 447 + dev_name(&interface->dev), 448 + timesync_interface->ping_frame_time); 449 + len = buflen - off; 450 + } 451 + } 452 + if (len < buflen) 453 + off += snprintf(&buf[off], len, "\n"); 454 + return off; 455 + } 456 + 457 + /* 458 + * Send an SVC initiated wake 'ping' to each TimeSync participant. 459 + * Get the FrameTime from each participant associated with the wake 460 + * ping. 461 + */ 462 + static void gb_timesync_ping(struct gb_timesync_svc *timesync_svc) 463 + { 464 + struct gb_svc *svc = timesync_svc->svc; 465 + struct gb_host_device *hd; 466 + struct gb_timesync_interface *timesync_interface; 467 + struct gb_control *control; 468 + u64 *ping_frame_time; 469 + int ret; 470 + 471 + /* Get access to the wake pins in the AP and SVC */ 472 + ret = gb_timesync_platform_lock_bus(timesync_svc); 473 + if (ret < 0) { 474 + gb_timesync_platform_lock_bus_fail(timesync_svc, ret); 475 + return; 476 + } 477 + ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask); 478 + if (ret) { 479 + dev_err(&svc->dev, 480 + "gb_svc_timesync_wake_pins_acquire %d\n", ret); 481 + gb_timesync_teardown(timesync_svc); 482 + return; 483 + } 484 + 485 + /* Have SVC generate a timesync ping */ 486 + timesync_svc->capture_ping = true; 487 + ret = gb_svc_timesync_ping(svc, &timesync_svc->svc_ping_frame_time); 488 + timesync_svc->capture_ping = false; 489 + if (ret) { 490 + dev_err(&svc->dev, 491 + "gb_svc_timesync_ping %d\n", ret); 492 + gb_timesync_teardown(timesync_svc); 493 + return; 494 + } 495 + 496 + /* Get the ping FrameTime from each APB/GPB */ 497 + hd = timesync_svc->timesync_hd->hd; 498 + ret = hd->driver->timesync_get_last_event(hd, 499 + &timesync_svc->timesync_hd->ping_frame_time); 500 + if (ret) 501 + dev_err(&hd->dev, "host timesync_get_last_event %d\n", ret); 502 + 503 + list_for_each_entry(timesync_interface, 504 + &timesync_svc->interface_list, list) { 505 + control = timesync_interface->interface->control; 506 + ping_frame_time = &timesync_interface->ping_frame_time; 507 + ret = gb_control_timesync_get_last_event(control, 508 + ping_frame_time); 509 + if (ret) { 510 + dev_err(&timesync_interface->interface->dev, 511 + "gb_control_timesync_get_last_event %d\n", ret); 512 + } 513 + } 514 + 515 + /* Ping success - move to timesync active */ 516 + gb_svc_timesync_wake_pins_release(svc); 517 + gb_timesync_platform_unlock_bus(); 518 + gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE); 519 + } 520 + 521 + static void gb_timesync_log_ping_time(struct gb_timesync_svc *timesync_svc) 522 + { 523 + char *buf; 524 + 525 + if (!timesync_svc->print_ping) 526 + return; 527 + 528 + buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 529 + if (buf) { 530 + gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE); 531 + pr_info("%s", buf); 532 + kfree(buf); 533 + } 534 + } 535 + 536 + /* 537 + * Perform the actual work of scheduled TimeSync logic. 538 + */ 539 + static void gb_timesync_worker(struct work_struct *work) 540 + { 541 + struct delayed_work *delayed_work = to_delayed_work(work); 542 + struct gb_timesync_svc *timesync_svc = 543 + container_of(delayed_work, struct gb_timesync_svc, delayed_work); 544 + 545 + mutex_lock(&timesync_svc->mutex); 546 + 547 + switch (timesync_svc->state) { 548 + case GB_TIMESYNC_STATE_INIT: 549 + gb_timesync_enable(timesync_svc); 550 + break; 551 + 552 + case GB_TIMESYNC_STATE_WAIT_SVC: 553 + dev_err(&timesync_svc->svc->dev, 554 + "timeout SVC strobe completion\n"); 555 + gb_timesync_teardown(timesync_svc); 556 + break; 557 + 558 + case GB_TIMESYNC_STATE_AUTHORITATIVE: 559 + gb_timesync_authoritative(timesync_svc); 560 + break; 561 + 562 + case GB_TIMESYNC_STATE_PING: 563 + gb_timesync_ping(timesync_svc); 564 + gb_timesync_log_ping_time(timesync_svc); 565 + break; 566 + 567 + default: 568 + pr_err("Invalid state %d for delayed work\n", 569 + timesync_svc->state); 570 + break; 571 + } 572 + 573 + mutex_unlock(&timesync_svc->mutex); 574 + } 575 + 576 + /* 577 + * Schedule a new TimeSync INIT or PING operation serialized w/r to 578 + * gb_timesync_worker(). 579 + */ 580 + static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state) 581 + { 582 + int ret = 0; 583 + 584 + if (state != GB_TIMESYNC_STATE_INIT && state != GB_TIMESYNC_STATE_PING) 585 + return -EINVAL; 586 + 587 + mutex_lock(&timesync_svc->mutex); 588 + if (timesync_svc->state == GB_TIMESYNC_STATE_INACTIVE || 589 + timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) { 590 + gb_timesync_set_state_atomic(timesync_svc, state); 591 + } else { 592 + ret = -ENODEV; 593 + } 594 + mutex_unlock(&timesync_svc->mutex); 595 + return ret; 596 + } 597 + 598 + static int __gb_timesync_schedule_synchronous( 599 + struct gb_timesync_svc *timesync_svc, int state) 600 + { 601 + unsigned long flags; 602 + int ret; 603 + 604 + ret = gb_timesync_schedule(timesync_svc, state); 605 + if (ret) 606 + return ret; 607 + 608 + ret = wait_event_interruptible(timesync_svc->wait_queue, 609 + (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE || 610 + timesync_svc->state == GB_TIMESYNC_STATE_INACTIVE || 611 + timesync_svc->state == GB_TIMESYNC_STATE_INVALID)); 612 + if (ret) 613 + return ret; 614 + 615 + mutex_lock(&timesync_svc->mutex); 616 + spin_lock_irqsave(&timesync_svc->spinlock, flags); 617 + 618 + switch (timesync_svc->state) { 619 + case GB_TIMESYNC_STATE_INVALID: 620 + case GB_TIMESYNC_STATE_INACTIVE: 621 + ret = -ENODEV; 622 + break; 623 + case GB_TIMESYNC_STATE_INIT: 624 + case GB_TIMESYNC_STATE_WAIT_SVC: 625 + case GB_TIMESYNC_STATE_AUTHORITATIVE: 626 + case GB_TIMESYNC_STATE_PING: 627 + ret = -EAGAIN; 628 + break; 629 + case GB_TIMESYNC_STATE_ACTIVE: 630 + ret = 0; 631 + break; 632 + } 633 + 634 + spin_unlock_irqrestore(&timesync_svc->spinlock, flags); 635 + mutex_unlock(&timesync_svc->mutex); 636 + 637 + return ret; 638 + } 639 + 640 + static struct gb_timesync_svc *gb_timesync_find_timesync_svc( 641 + struct gb_host_device *hd) 642 + { 643 + struct gb_timesync_svc *timesync_svc; 644 + 645 + list_for_each_entry(timesync_svc, &gb_timesync_svc_list, list) { 646 + if (timesync_svc->svc == hd->svc) 647 + return timesync_svc; 648 + } 649 + return NULL; 650 + } 651 + 652 + static struct gb_timesync_interface *gb_timesync_find_timesync_interface( 653 + struct gb_timesync_svc *timesync_svc, 654 + struct gb_interface *interface) 655 + { 656 + struct gb_timesync_interface *timesync_interface; 657 + 658 + list_for_each_entry(timesync_interface, &timesync_svc->interface_list, list) { 659 + if (timesync_interface->interface == interface) 660 + return timesync_interface; 661 + } 662 + return NULL; 663 + } 664 + 665 + int gb_timesync_schedule_synchronous(struct gb_interface *interface) 666 + { 667 + int ret; 668 + struct gb_timesync_svc *timesync_svc; 669 + 670 + if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC)) 671 + return 0; 672 + 673 + mutex_lock(&gb_timesync_svc_list_mutex); 674 + timesync_svc = gb_timesync_find_timesync_svc(interface->hd); 675 + if (!timesync_svc) { 676 + ret = -ENODEV; 677 + goto done; 678 + } 679 + 680 + ret = __gb_timesync_schedule_synchronous(timesync_svc, 681 + GB_TIMESYNC_STATE_INIT); 682 + done: 683 + mutex_unlock(&gb_timesync_svc_list_mutex); 684 + return ret; 685 + } 686 + EXPORT_SYMBOL_GPL(gb_timesync_schedule_synchronous); 687 + 688 + void gb_timesync_schedule_asynchronous(struct gb_interface *interface) 689 + { 690 + struct gb_timesync_svc *timesync_svc; 691 + 692 + if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC)) 693 + return; 694 + 695 + mutex_lock(&gb_timesync_svc_list_mutex); 696 + timesync_svc = gb_timesync_find_timesync_svc(interface->hd); 697 + if (!timesync_svc) 698 + goto done; 699 + 700 + gb_timesync_schedule(timesync_svc, GB_TIMESYNC_STATE_INIT); 701 + done: 702 + mutex_unlock(&gb_timesync_svc_list_mutex); 703 + return; 704 + } 705 + EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous); 706 + 707 + static ssize_t gb_timesync_ping_read(struct file *file, char __user *buf, 708 + size_t len, loff_t *offset) 709 + { 710 + struct gb_timesync_svc *timesync_svc = file->f_inode->i_private; 711 + char *pbuf; 712 + ssize_t ret = 0; 713 + 714 + mutex_lock(&gb_timesync_svc_list_mutex); 715 + mutex_lock(&timesync_svc->mutex); 716 + if (list_empty(&timesync_svc->interface_list)) 717 + ret = -ENODEV; 718 + timesync_svc->print_ping = false; 719 + mutex_unlock(&timesync_svc->mutex); 720 + if (ret) 721 + goto done; 722 + 723 + ret = __gb_timesync_schedule_synchronous(timesync_svc, 724 + GB_TIMESYNC_STATE_PING); 725 + if (ret) 726 + goto done; 727 + 728 + pbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); 729 + if (!pbuf) { 730 + ret = -ENOMEM; 731 + goto done; 732 + } 733 + 734 + ret = gb_timesync_log_frame_time(timesync_svc, pbuf, PAGE_SIZE); 735 + if (ret > 0) 736 + ret = simple_read_from_buffer(buf, len, offset, pbuf, ret); 737 + kfree(pbuf); 738 + done: 739 + mutex_unlock(&gb_timesync_svc_list_mutex); 740 + return ret; 741 + } 742 + 743 + static const struct file_operations gb_timesync_debugfs_ops = { 744 + .read = gb_timesync_ping_read, 745 + }; 746 + 747 + static int gb_timesync_hd_add(struct gb_timesync_svc *timesync_svc, 748 + struct gb_host_device *hd) 749 + { 750 + struct gb_timesync_host_device *timesync_hd; 751 + 752 + timesync_hd = kzalloc(sizeof(*timesync_hd), GFP_KERNEL); 753 + if (!timesync_hd) 754 + return -ENOMEM; 755 + 756 + WARN_ON(timesync_svc->timesync_hd); 757 + timesync_hd->hd = hd; 758 + timesync_svc->timesync_hd = timesync_hd; 759 + 760 + return 0; 761 + } 762 + 763 + static void gb_timesync_hd_remove(struct gb_timesync_svc *timesync_svc, 764 + struct gb_host_device *hd) 765 + { 766 + if (timesync_svc->timesync_hd->hd == hd) { 767 + kfree(timesync_svc->timesync_hd); 768 + timesync_svc->timesync_hd = NULL; 769 + return; 770 + } 771 + WARN_ON(1); 772 + } 773 + 774 + int gb_timesync_svc_add(struct gb_svc *svc) 775 + { 776 + struct gb_timesync_svc *timesync_svc; 777 + int ret; 778 + 779 + timesync_svc = kzalloc(sizeof(*timesync_svc), GFP_KERNEL); 780 + if (!timesync_svc) 781 + return -ENOMEM; 782 + 783 + timesync_svc->work_queue = 784 + create_singlethread_workqueue("gb-timesync-work_queue"); 785 + 786 + if (!timesync_svc->work_queue) { 787 + kfree(timesync_svc); 788 + return -ENOMEM; 789 + } 790 + 791 + mutex_lock(&gb_timesync_svc_list_mutex); 792 + INIT_LIST_HEAD(&timesync_svc->interface_list); 793 + INIT_DELAYED_WORK(&timesync_svc->delayed_work, gb_timesync_worker); 794 + mutex_init(&timesync_svc->mutex); 795 + spin_lock_init(&timesync_svc->spinlock); 796 + init_waitqueue_head(&timesync_svc->wait_queue); 797 + 798 + timesync_svc->svc = svc; 799 + timesync_svc->frame_time_offset = 0; 800 + timesync_svc->capture_ping = false; 801 + gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE); 802 + timesync_svc->frame_time_dentry = 803 + debugfs_create_file("frame-time", S_IRUGO, svc->debugfs_dentry, 804 + timesync_svc, &gb_timesync_debugfs_ops); 805 + list_add(&timesync_svc->list, &gb_timesync_svc_list); 806 + ret = gb_timesync_hd_add(timesync_svc, svc->hd); 807 + if (ret) { 808 + list_del(&timesync_svc->list); 809 + debugfs_remove(timesync_svc->frame_time_dentry); 810 + destroy_workqueue(timesync_svc->work_queue); 811 + kfree(timesync_svc); 812 + } 813 + mutex_unlock(&gb_timesync_svc_list_mutex); 814 + return ret; 815 + } 816 + EXPORT_SYMBOL_GPL(gb_timesync_svc_add); 817 + 818 + void gb_timesync_svc_remove(struct gb_svc *svc) 819 + { 820 + struct gb_timesync_svc *timesync_svc; 821 + struct gb_timesync_interface *timesync_interface; 822 + struct gb_timesync_interface *next; 823 + 824 + mutex_lock(&gb_timesync_svc_list_mutex); 825 + timesync_svc = gb_timesync_find_timesync_svc(svc->hd); 826 + if (!timesync_svc) 827 + goto done; 828 + 829 + mutex_lock(&timesync_svc->mutex); 830 + 831 + gb_timesync_teardown(timesync_svc); 832 + 833 + gb_timesync_hd_remove(timesync_svc, svc->hd); 834 + list_for_each_entry_safe(timesync_interface, next, 835 + &timesync_svc->interface_list, list) { 836 + list_del(&timesync_interface->list); 837 + kfree(timesync_interface); 838 + } 839 + gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INVALID); 840 + debugfs_remove(timesync_svc->frame_time_dentry); 841 + cancel_delayed_work_sync(&timesync_svc->delayed_work); 842 + destroy_workqueue(timesync_svc->work_queue); 843 + list_del(&timesync_svc->list); 844 + 845 + mutex_unlock(&timesync_svc->mutex); 846 + 847 + kfree(timesync_svc); 848 + done: 849 + mutex_unlock(&gb_timesync_svc_list_mutex); 850 + } 851 + EXPORT_SYMBOL_GPL(gb_timesync_svc_remove); 852 + 853 + /* 854 + * Add a Greybus Interface to the set of TimeSync Interfaces. 855 + */ 856 + int gb_timesync_interface_add(struct gb_interface *interface) 857 + { 858 + struct gb_timesync_svc *timesync_svc; 859 + struct gb_timesync_interface *timesync_interface; 860 + int ret = 0; 861 + 862 + if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC)) 863 + return 0; 864 + 865 + mutex_lock(&gb_timesync_svc_list_mutex); 866 + timesync_svc = gb_timesync_find_timesync_svc(interface->hd); 867 + if (!timesync_svc) { 868 + ret = -ENODEV; 869 + goto done; 870 + } 871 + 872 + timesync_interface = kzalloc(sizeof(*timesync_interface), GFP_KERNEL); 873 + if (!timesync_interface) { 874 + ret = -ENOMEM; 875 + goto done; 876 + } 877 + 878 + mutex_lock(&timesync_svc->mutex); 879 + timesync_interface->interface = interface; 880 + list_add(&timesync_interface->list, &timesync_svc->interface_list); 881 + timesync_svc->strobe_mask |= 1 << interface->interface_id; 882 + mutex_unlock(&timesync_svc->mutex); 883 + 884 + done: 885 + mutex_unlock(&gb_timesync_svc_list_mutex); 886 + return ret; 887 + } 888 + EXPORT_SYMBOL_GPL(gb_timesync_interface_add); 889 + 890 + /* 891 + * Remove a Greybus Interface from the set of TimeSync Interfaces. 892 + */ 893 + void gb_timesync_interface_remove(struct gb_interface *interface) 894 + { 895 + struct gb_timesync_svc *timesync_svc; 896 + struct gb_timesync_interface *timesync_interface; 897 + 898 + if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC)) 899 + return; 900 + 901 + mutex_lock(&gb_timesync_svc_list_mutex); 902 + timesync_svc = gb_timesync_find_timesync_svc(interface->hd); 903 + if (!timesync_svc) 904 + goto done; 905 + 906 + timesync_interface = gb_timesync_find_timesync_interface(timesync_svc, 907 + interface); 908 + if (!timesync_interface) 909 + goto done; 910 + 911 + mutex_lock(&timesync_svc->mutex); 912 + timesync_svc->strobe_mask &= ~(1 << interface->interface_id); 913 + list_del(&timesync_interface->list); 914 + kfree(timesync_interface); 915 + mutex_unlock(&timesync_svc->mutex); 916 + done: 917 + mutex_unlock(&gb_timesync_svc_list_mutex); 918 + } 919 + EXPORT_SYMBOL_GPL(gb_timesync_interface_remove); 920 + 921 + /* 922 + * Give the authoritative FrameTime to the calling function. Returns zero if we 923 + * are not in GB_TIMESYNC_STATE_ACTIVE. 924 + */ 925 + static u64 gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc) 926 + { 927 + unsigned long flags; 928 + u64 ret; 929 + 930 + spin_lock_irqsave(&timesync_svc->spinlock, flags); 931 + if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) 932 + ret = __gb_timesync_get_frame_time(timesync_svc); 933 + else 934 + ret = 0; 935 + spin_unlock_irqrestore(&timesync_svc->spinlock, flags); 936 + return ret; 937 + } 938 + 939 + u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface) 940 + { 941 + struct gb_timesync_svc *timesync_svc; 942 + u64 ret = 0; 943 + 944 + mutex_lock(&gb_timesync_svc_list_mutex); 945 + timesync_svc = gb_timesync_find_timesync_svc(interface->hd); 946 + if (!timesync_svc) 947 + goto done; 948 + 949 + ret = gb_timesync_get_frame_time(timesync_svc); 950 + done: 951 + mutex_unlock(&gb_timesync_svc_list_mutex); 952 + return ret; 953 + } 954 + EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_interface); 955 + 956 + u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc) 957 + { 958 + struct gb_timesync_svc *timesync_svc; 959 + u64 ret = 0; 960 + 961 + mutex_lock(&gb_timesync_svc_list_mutex); 962 + timesync_svc = gb_timesync_find_timesync_svc(svc->hd); 963 + if (!timesync_svc) 964 + goto done; 965 + 966 + ret = gb_timesync_get_frame_time(timesync_svc); 967 + done: 968 + mutex_unlock(&gb_timesync_svc_list_mutex); 969 + return ret; 970 + } 971 + EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_svc); 972 + 973 + void gb_timesync_irq(struct gb_timesync_svc *timesync_svc) 974 + { 975 + unsigned long flags; 976 + u64 strobe_time; 977 + 978 + strobe_time = __gb_timesync_get_frame_time(timesync_svc); 979 + 980 + spin_lock_irqsave(&timesync_svc->spinlock, flags); 981 + 982 + if (timesync_svc->state == GB_TIMESYNC_STATE_PING) { 983 + if (timesync_svc->capture_ping) 984 + timesync_svc->ap_ping_frame_time = strobe_time; 985 + goto done; 986 + } else if (timesync_svc->state != GB_TIMESYNC_STATE_WAIT_SVC) { 987 + goto done; 988 + } 989 + 990 + timesync_svc->strobe_time[timesync_svc->strobe] = strobe_time; 991 + 992 + if (++timesync_svc->strobe == GB_TIMESYNC_MAX_STROBES) { 993 + gb_timesync_set_state(timesync_svc, 994 + GB_TIMESYNC_STATE_AUTHORITATIVE); 995 + } 996 + done: 997 + spin_unlock_irqrestore(&timesync_svc->spinlock, flags); 998 + } 999 + EXPORT_SYMBOL(gb_timesync_irq); 1000 + 1001 + int __init gb_timesync_init(void) 1002 + { 1003 + int ret = 0; 1004 + 1005 + ret = gb_timesync_platform_init(); 1006 + if (ret) { 1007 + pr_err("timesync platform init fail!\n"); 1008 + return ret; 1009 + } 1010 + 1011 + gb_timesync_clock_rate = gb_timesync_platform_get_clock_rate(); 1012 + gb_timesync_ns_per_clock = NSEC_PER_SEC / gb_timesync_clock_rate; 1013 + 1014 + pr_info("Time-Sync timer frequency %lu Hz\n", gb_timesync_clock_rate); 1015 + return 0; 1016 + } 1017 + 1018 + void gb_timesync_exit(void) 1019 + { 1020 + gb_timesync_platform_exit(); 1021 + }
+41
drivers/staging/greybus/timesync.h
··· 1 + /* 2 + * TimeSync API driver. 3 + * 4 + * Copyright 2016 Google Inc. 5 + * Copyright 2016 Linaro Ltd. 6 + * 7 + * Released under the GPLv2 only. 8 + */ 9 + 10 + #ifndef __TIMESYNC_H 11 + #define __TIMESYNC_H 12 + 13 + struct gb_svc; 14 + struct gb_interface; 15 + struct gb_timesync_svc; 16 + 17 + /* Platform */ 18 + u64 gb_timesync_platform_get_counter(void); 19 + u32 gb_timesync_platform_get_clock_rate(void); 20 + int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata); 21 + void gb_timesync_platform_unlock_bus(void); 22 + 23 + int gb_timesync_platform_init(void); 24 + void gb_timesync_platform_exit(void); 25 + 26 + /* Core API */ 27 + int gb_timesync_interface_add(struct gb_interface *interface); 28 + void gb_timesync_interface_remove(struct gb_interface *interface); 29 + int gb_timesync_svc_add(struct gb_svc *svc); 30 + void gb_timesync_svc_remove(struct gb_svc *svc); 31 + 32 + u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface); 33 + u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc); 34 + 35 + int gb_timesync_schedule_synchronous(struct gb_interface *intf); 36 + void gb_timesync_schedule_asynchronous(struct gb_interface *intf); 37 + void gb_timesync_irq(struct gb_timesync_svc *timesync_svc); 38 + int gb_timesync_init(void); 39 + void gb_timesync_exit(void); 40 + 41 + #endif /* __TIMESYNC_H */
+77
drivers/staging/greybus/timesync_platform.c
··· 1 + /* 2 + * TimeSync API driver. 3 + * 4 + * Copyright 2016 Google Inc. 5 + * Copyright 2016 Linaro Ltd. 6 + * 7 + * Released under the GPLv2 only. 8 + * 9 + * This code reads directly from an ARMv7 memory-mapped timer that lives in 10 + * MMIO space. Since this counter lives inside of MMIO space its shared between 11 + * cores and that means we don't have to worry about issues like TSC on x86 12 + * where each time-stamp-counter (TSC) is local to a particular core. 13 + * 14 + * Register-level access code is based on 15 + * drivers/clocksource/arm_arch_timer.c 16 + */ 17 + #include <linux/cpufreq.h> 18 + #include <linux/of_platform.h> 19 + 20 + #include "greybus.h" 21 + #include "arche_platform.h" 22 + 23 + static u32 gb_timesync_clock_frequency; 24 + int (*arche_platform_change_state_cb)(enum arche_platform_state state, 25 + struct gb_timesync_svc *pdata); 26 + EXPORT_SYMBOL_GPL(arche_platform_change_state_cb); 27 + 28 + u64 gb_timesync_platform_get_counter(void) 29 + { 30 + return (u64)get_cycles(); 31 + } 32 + 33 + u32 gb_timesync_platform_get_clock_rate(void) 34 + { 35 + if (unlikely(!gb_timesync_clock_frequency)) 36 + return cpufreq_get(0); 37 + 38 + return gb_timesync_clock_frequency; 39 + } 40 + 41 + int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata) 42 + { 43 + return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC, 44 + pdata); 45 + } 46 + 47 + void gb_timesync_platform_unlock_bus(void) 48 + { 49 + arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL); 50 + } 51 + 52 + static const struct of_device_id arch_timer_of_match[] = { 53 + { .compatible = "google,greybus-frame-time-counter", }, 54 + {}, 55 + }; 56 + 57 + int __init gb_timesync_platform_init(void) 58 + { 59 + struct device_node *np; 60 + 61 + np = of_find_matching_node(NULL, arch_timer_of_match); 62 + if (!np) { 63 + /* Tolerate not finding to allow BBB etc to continue */ 64 + pr_warn("Unable to find a compatible ARMv7 timer\n"); 65 + return 0; 66 + } 67 + 68 + if (of_property_read_u32(np, "clock-frequency", 69 + &gb_timesync_clock_frequency)) { 70 + pr_err("Unable to find timer clock-frequency\n"); 71 + return -ENODEV; 72 + } 73 + 74 + return 0; 75 + } 76 + 77 + void gb_timesync_platform_exit(void) {}