Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch_timer: Move to generic sched_clock framework

Register with the generic sched_clock framework now that it
supports 64 bits. This fixes two problems with the current
sched_clock support for machines using the architected timers.
First off, we don't subtract the start value from subsequent
sched_clock calls so we can potentially start off with
sched_clock returning gigantic numbers. Second, there is no
support for suspend/resume handling so problems such as discussed
in 6a4dae5 (ARM: 7565/1: sched: stop sched_clock() during
suspend, 2012-10-23) can happen without this patch. Finally, it
allows us to move the sched_clock setup into drivers clocksource
out of the arch ports.

Cc: Christopher Covington <cov@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: John Stultz <john.stultz@linaro.org>

authored by

Stephen Boyd and committed by
John Stultz
65cd4f6c 07783397

+11 -24
-14
arch/arm/kernel/arch_timer.c
··· 11 11 #include <linux/init.h> 12 12 #include <linux/types.h> 13 13 #include <linux/errno.h> 14 - #include <linux/sched_clock.h> 15 14 16 15 #include <asm/delay.h> 17 16 ··· 19 20 static unsigned long arch_timer_read_counter_long(void) 20 21 { 21 22 return arch_timer_read_counter(); 22 - } 23 - 24 - static u32 sched_clock_mult __read_mostly; 25 - 26 - static unsigned long long notrace arch_timer_sched_clock(void) 27 - { 28 - return arch_timer_read_counter() * sched_clock_mult; 29 23 } 30 24 31 25 static struct delay_timer arch_delay_timer; ··· 39 47 return -ENXIO; 40 48 41 49 arch_timer_delay_timer_register(); 42 - 43 - /* Cache the sched_clock multiplier to save a divide in the hot path. */ 44 - sched_clock_mult = NSEC_PER_SEC / arch_timer_rate; 45 - sched_clock_func = arch_timer_sched_clock; 46 - pr_info("sched_clock: ARM arch timer >56 bits at %ukHz, resolution %uns\n", 47 - arch_timer_rate / 1000, sched_clock_mult); 48 50 49 51 return 0; 50 52 }
+1
arch/arm64/Kconfig
··· 14 14 select GENERIC_IOMAP 15 15 select GENERIC_IRQ_PROBE 16 16 select GENERIC_IRQ_SHOW 17 + select GENERIC_SCHED_CLOCK 17 18 select GENERIC_SMP_IDLE_THREAD 18 19 select GENERIC_TIME_VSYSCALL 19 20 select HARDIRQS_SW_RESEND
-10
arch/arm64/kernel/time.c
··· 61 61 EXPORT_SYMBOL(profile_pc); 62 62 #endif 63 63 64 - static u64 sched_clock_mult __read_mostly; 65 - 66 - unsigned long long notrace sched_clock(void) 67 - { 68 - return arch_timer_read_counter() * sched_clock_mult; 69 - } 70 - 71 64 void __init time_init(void) 72 65 { 73 66 u32 arch_timer_rate; ··· 70 77 arch_timer_rate = arch_timer_get_rate(); 71 78 if (!arch_timer_rate) 72 79 panic("Unable to initialise architected timer.\n"); 73 - 74 - /* Cache the sched_clock multiplier to save a divide in the hot path. */ 75 - sched_clock_mult = NSEC_PER_SEC / arch_timer_rate; 76 80 77 81 /* Calibrate the delay loop directly */ 78 82 lpj_fine = arch_timer_rate / HZ;
+10
drivers/clocksource/arm_arch_timer.c
··· 19 19 #include <linux/of_address.h> 20 20 #include <linux/io.h> 21 21 #include <linux/slab.h> 22 + #include <linux/sched_clock.h> 22 23 23 24 #include <asm/arch_timer.h> 24 25 #include <asm/virt.h> ··· 471 470 err = -ENOMEM; 472 471 goto out; 473 472 } 473 + 474 + clocksource_register_hz(&clocksource_counter, arch_timer_rate); 475 + cyclecounter.mult = clocksource_counter.mult; 476 + cyclecounter.shift = clocksource_counter.shift; 477 + timecounter_init(&timecounter, &cyclecounter, 478 + arch_counter_get_cntvct()); 479 + 480 + /* 56 bits minimum, so we assume worst case rollover */ 481 + sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate); 474 482 475 483 if (arch_timer_use_virtual) { 476 484 ppi = arch_timer_ppi[VIRT_PPI];