Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch/tile: fix __ndelay etc to work better

The current implementations of __ndelay and __udelay call a hypervisor
service to delay, but the hypervisor service isn't actually implemented
very well, and the consensus is that Linux should handle figuring this
out natively and not use a hypervisor service.

By converting nanoseconds to cycles, and then spinning until the
cycle counter reaches the desired cycle, we get several benefits:
first, we are sensitive to the actual clock speed; second, we use
less power by issuing a slow SPR read once every six cycles while
we delay; and third, we properly handle the case of an interrupt by
exiting at the target time rather than after some number of cycles.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>

+34 -11
+3
arch/tile/include/asm/timex.h
··· 38 38 39 39 cycles_t get_clock_rate(void); 40 40 41 + /* Convert nanoseconds to core clock cycles. */ 42 + cycles_t ns2cycles(unsigned long nsecs); 43 + 41 44 /* Called at cpu initialization to set some low-level constants. */ 42 45 void setup_clock(void); 43 46
+5
arch/tile/include/hv/hypervisor.h
··· 964 964 965 965 /** Waits for at least the specified number of nanoseconds then returns. 966 966 * 967 + * NOTE: this deprecated function currently assumes a 750 MHz clock, 968 + * and is thus not generally suitable for use. New code should call 969 + * hv_sysconf(HV_SYSCONF_CPU_SPEED), compute a cycle count to wait for, 970 + * and delay by looping while checking the cycle counter SPR. 971 + * 967 972 * @param nanosecs The number of nanoseconds to sleep. 968 973 */ 969 974 void hv_nanosleep(int nanosecs);
-6
arch/tile/kernel/entry.S
··· 38 38 jrp lr 39 39 STD_ENDPROC(kernel_execve) 40 40 41 - /* Delay a fixed number of cycles. */ 42 - STD_ENTRY(__delay) 43 - { addi r0, r0, -1; bnzt r0, . } 44 - jrp lr 45 - STD_ENDPROC(__delay) 46 - 47 41 /* 48 42 * We don't run this function directly, but instead copy it to a page 49 43 * we map into every user process. See vdso_setup().
+10
arch/tile/kernel/time.c
··· 224 224 { 225 225 return -EINVAL; 226 226 } 227 + 228 + /* 229 + * Use the tile timer to convert nsecs to core clock cycles, relying 230 + * on it having the same frequency as SPR_CYCLE. 231 + */ 232 + cycles_t ns2cycles(unsigned long nsecs) 233 + { 234 + struct clock_event_device *dev = &__get_cpu_var(tile_timer); 235 + return ((u64)nsecs * dev->mult) >> dev->shift; 236 + }
+16 -5
arch/tile/lib/delay.c
··· 15 15 #include <linux/module.h> 16 16 #include <linux/delay.h> 17 17 #include <linux/thread_info.h> 18 - #include <asm/fixmap.h> 19 - #include <hv/hypervisor.h> 18 + #include <asm/timex.h> 20 19 21 20 void __udelay(unsigned long usecs) 22 21 { 23 - hv_nanosleep(usecs * 1000); 22 + if (usecs > ULONG_MAX / 1000) { 23 + WARN_ON_ONCE(usecs > ULONG_MAX / 1000); 24 + usecs = ULONG_MAX / 1000; 25 + } 26 + __ndelay(usecs * 1000); 24 27 } 25 28 EXPORT_SYMBOL(__udelay); 26 29 27 30 void __ndelay(unsigned long nsecs) 28 31 { 29 - hv_nanosleep(nsecs); 32 + cycles_t target = get_cycles(); 33 + target += ns2cycles(nsecs); 34 + while (get_cycles() < target) 35 + cpu_relax(); 30 36 } 31 37 EXPORT_SYMBOL(__ndelay); 32 38 33 - /* FIXME: should be declared in a header somewhere. */ 39 + void __delay(unsigned long cycles) 40 + { 41 + cycles_t target = get_cycles() + cycles; 42 + while (get_cycles() < target) 43 + cpu_relax(); 44 + } 34 45 EXPORT_SYMBOL(__delay);