Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer updates from Thomas Gleixner:
"The timer departement delivers:

- more year 2038 rework

- a massive rework of the arm achitected timer

- preparatory patches to allow NTP correction of clock event devices
to avoid early expiry

- the usual pile of fixes and enhancements all over the place"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (91 commits)
timer/sysclt: Restrict timer migration sysctl values to 0 and 1
arm64/arch_timer: Mark errata handlers as __maybe_unused
Clocksource/mips-gic: Remove redundant non devicetree init
MIPS/Malta: Probe gic-timer via devicetree
clocksource: Use GENMASK_ULL in definition of CLOCKSOURCE_MASK
acpi/arm64: Add SBSA Generic Watchdog support in GTDT driver
clocksource: arm_arch_timer: add GTDT support for memory-mapped timer
acpi/arm64: Add memory-mapped timer support in GTDT driver
clocksource: arm_arch_timer: simplify ACPI support code.
acpi/arm64: Add GTDT table parse driver
clocksource: arm_arch_timer: split MMIO timer probing.
clocksource: arm_arch_timer: add structs to describe MMIO timer
clocksource: arm_arch_timer: move arch_timer_needs_of_probing into DT init call
clocksource: arm_arch_timer: refactor arch_timer_needs_probing
clocksource: arm_arch_timer: split dt-only rate handling
x86/uv/time: Set ->min_delta_ticks and ->max_delta_ticks
unicore32/time: Set ->min_delta_ticks and ->max_delta_ticks
um/time: Set ->min_delta_ticks and ->max_delta_ticks
tile/time: Set ->min_delta_ticks and ->max_delta_ticks
score/time: Set ->min_delta_ticks and ->max_delta_ticks
...

+2125 -900
+1
Documentation/arm64/silicon-errata.txt
··· 54 54 | ARM | Cortex-A57 | #852523 | N/A | 55 55 | ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | 56 56 | ARM | Cortex-A72 | #853709 | N/A | 57 + | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | 57 58 | ARM | MMU-500 | #841119,#826419 | N/A | 58 59 | | | | | 59 60 | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
-22
Documentation/devicetree/bindings/timer/cortina,gemini-timer.txt
··· 1 - Cortina Systems Gemini timer 2 - 3 - This timer is embedded in the Cortina Systems Gemini SoCs. 4 - 5 - Required properties: 6 - 7 - - compatible : Must be "cortina,gemini-timer" 8 - - reg : Should contain registers location and length 9 - - interrupts : Should contain the three timer interrupts with 10 - flags for rising edge 11 - - syscon : a phandle to the global Gemini system controller 12 - 13 - Example: 14 - 15 - timer@43000000 { 16 - compatible = "cortina,gemini-timer"; 17 - reg = <0x43000000 0x1000>; 18 - interrupts = <14 IRQ_TYPE_EDGE_RISING>, /* Timer 1 */ 19 - <15 IRQ_TYPE_EDGE_RISING>, /* Timer 2 */ 20 - <16 IRQ_TYPE_EDGE_RISING>; /* Timer 3 */ 21 - syscon = <&syscon>; 22 - };
+33
Documentation/devicetree/bindings/timer/faraday,fttmr010.txt
··· 1 + Faraday Technology timer 2 + 3 + This timer is a generic IP block from Faraday Technology, embedded in the 4 + Cortina Systems Gemini SoCs and other designs. 5 + 6 + Required properties: 7 + 8 + - compatible : Must be one of 9 + "faraday,fttmr010" 10 + "cortina,gemini-timer" 11 + - reg : Should contain registers location and length 12 + - interrupts : Should contain the three timer interrupts usually with 13 + flags for falling edge 14 + 15 + Optionally required properties: 16 + 17 + - clocks : a clock to provide the tick rate for "faraday,fttmr010" 18 + - clock-names : should be "EXTCLK" and "PCLK" for the external tick timer 19 + and peripheral clock respectively, for "faraday,fttmr010" 20 + - syscon : a phandle to the global Gemini system controller if the compatible 21 + type is "cortina,gemini-timer" 22 + 23 + Example: 24 + 25 + timer@43000000 { 26 + compatible = "faraday,fttmr010"; 27 + reg = <0x43000000 0x1000>; 28 + interrupts = <14 IRQ_TYPE_EDGE_FALLING>, /* Timer 1 */ 29 + <15 IRQ_TYPE_EDGE_FALLING>, /* Timer 2 */ 30 + <16 IRQ_TYPE_EDGE_FALLING>; /* Timer 3 */ 31 + clocks = <&extclk>, <&pclk>; 32 + clock-names = "EXTCLK", "PCLK"; 33 + };
+9 -3
Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt
··· 1 1 Rockchip rk timer 2 2 3 3 Required properties: 4 - - compatible: shall be one of: 5 - "rockchip,rk3288-timer" - for rk3066, rk3036, rk3188, rk322x, rk3288, rk3368 6 - "rockchip,rk3399-timer" - for rk3399 4 + - compatible: should be: 5 + "rockchip,rk3036-timer", "rockchip,rk3288-timer": for Rockchip RK3036 6 + "rockchip,rk3066-timer", "rockchip,rk3288-timer": for Rockchip RK3066 7 + "rockchip,rk3188-timer", "rockchip,rk3288-timer": for Rockchip RK3188 8 + "rockchip,rk3228-timer", "rockchip,rk3288-timer": for Rockchip RK3228 9 + "rockchip,rk3229-timer", "rockchip,rk3288-timer": for Rockchip RK3229 10 + "rockchip,rk3288-timer": for Rockchip RK3288 11 + "rockchip,rk3368-timer", "rockchip,rk3288-timer": for Rockchip RK3368 12 + "rockchip,rk3399-timer": for Rockchip RK3399 7 13 - reg: base address of the timer register starting with TIMERS CONTROL register 8 14 - interrupts: should contain the interrupts for Timer0 9 15 - clocks : must contain an entry for each entry in clock-names
+1
MAINTAINERS
··· 11119 11119 TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER 11120 11120 M: John Stultz <john.stultz@linaro.org> 11121 11121 M: Thomas Gleixner <tglx@linutronix.de> 11122 + R: Stephen Boyd <sboyd@codeaurora.org> 11122 11123 L: linux-kernel@vger.kernel.org 11123 11124 T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core 11124 11125 S: Supported
+3 -1
arch/alpha/kernel/osf_sys.c
··· 1016 1016 SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv, 1017 1017 struct timezone __user *, tz) 1018 1018 { 1019 + struct timespec64 kts64; 1019 1020 struct timespec kts; 1020 1021 struct timezone ktz; 1021 1022 ··· 1024 1023 if (get_tv32((struct timeval *)&kts, tv)) 1025 1024 return -EFAULT; 1026 1025 kts.tv_nsec *= 1000; 1026 + kts64 = timespec_to_timespec64(kts); 1027 1027 } 1028 1028 if (tz) { 1029 1029 if (copy_from_user(&ktz, tz, sizeof(*tz))) 1030 1030 return -EFAULT; 1031 1031 } 1032 1032 1033 - return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); 1033 + return do_sys_settimeofday64(tv ? &kts64 : NULL, tz ? &ktz : NULL); 1034 1034 } 1035 1035 1036 1036 asmlinkage long sys_ni_posix_timers(void);
+17
arch/arm/boot/dts/rk3188.dtsi
··· 106 106 }; 107 107 }; 108 108 109 + timer3: timer@2000e000 { 110 + compatible = "rockchip,rk3188-timer", "rockchip,rk3288-timer"; 111 + reg = <0x2000e000 0x20>; 112 + interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>; 113 + clocks = <&cru SCLK_TIMER3>, <&cru PCLK_TIMER3>; 114 + clock-names = "timer", "pclk"; 115 + }; 116 + 117 + timer6: timer@200380a0 { 118 + compatible = "rockchip,rk3188-timer", "rockchip,rk3288-timer"; 119 + reg = <0x200380a0 0x20>; 120 + interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>; 121 + clocks = <&cru SCLK_TIMER6>, <&cru PCLK_TIMER0>; 122 + clock-names = "timer", "pclk"; 123 + }; 124 + 109 125 i2s0: i2s@1011a000 { 110 126 compatible = "rockchip,rk3188-i2s", "rockchip,rk3066-i2s"; 111 127 reg = <0x1011a000 0x2000>; ··· 546 530 547 531 &global_timer { 548 532 interrupts = <GIC_PPI 11 0xf04>; 533 + status = "disabled"; 549 534 }; 550 535 551 536 &local_timer {
+1 -1
arch/arm/boot/dts/rk322x.dtsi
··· 325 325 }; 326 326 327 327 timer: timer@110c0000 { 328 - compatible = "rockchip,rk3288-timer"; 328 + compatible = "rockchip,rk3228-timer", "rockchip,rk3288-timer"; 329 329 reg = <0x110c0000 0x20>; 330 330 interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>; 331 331 clocks = <&xin24m>, <&cru PCLK_TIMER>;
+1
arch/arm64/Kconfig
··· 2 2 def_bool y 3 3 select ACPI_CCA_REQUIRED if ACPI 4 4 select ACPI_GENERIC_GSI if ACPI 5 + select ACPI_GTDT if ACPI 5 6 select ACPI_REDUCED_HARDWARE_ONLY if ACPI 6 7 select ACPI_MCFG if ACPI 7 8 select ACPI_SPCR_TABLE if ACPI
+31 -10
arch/arm64/include/asm/arch_timer.h
··· 25 25 #include <linux/bug.h> 26 26 #include <linux/init.h> 27 27 #include <linux/jump_label.h> 28 + #include <linux/smp.h> 28 29 #include <linux/types.h> 29 30 30 31 #include <clocksource/arm_arch_timer.h> ··· 38 37 #define needs_unstable_timer_counter_workaround() false 39 38 #endif 40 39 40 + enum arch_timer_erratum_match_type { 41 + ate_match_dt, 42 + ate_match_local_cap_id, 43 + ate_match_acpi_oem_info, 44 + }; 45 + 46 + struct clock_event_device; 41 47 42 48 struct arch_timer_erratum_workaround { 43 - const char *id; /* Indicate the Erratum ID */ 49 + enum arch_timer_erratum_match_type match_type; 50 + const void *id; 51 + const char *desc; 44 52 u32 (*read_cntp_tval_el0)(void); 45 53 u32 (*read_cntv_tval_el0)(void); 46 54 u64 (*read_cntvct_el0)(void); 55 + int (*set_next_event_phys)(unsigned long, struct clock_event_device *); 56 + int (*set_next_event_virt)(unsigned long, struct clock_event_device *); 47 57 }; 48 58 49 - extern const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround; 59 + DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *, 60 + timer_unstable_counter_workaround); 50 61 51 - #define arch_timer_reg_read_stable(reg) \ 52 - ({ \ 53 - u64 _val; \ 54 - if (needs_unstable_timer_counter_workaround()) \ 55 - _val = timer_unstable_counter_workaround->read_##reg();\ 56 - else \ 57 - _val = read_sysreg(reg); \ 58 - _val; \ 62 + #define arch_timer_reg_read_stable(reg) \ 63 + ({ \ 64 + u64 _val; \ 65 + if (needs_unstable_timer_counter_workaround()) { \ 66 + const struct arch_timer_erratum_workaround *wa; \ 67 + preempt_disable(); \ 68 + wa = __this_cpu_read(timer_unstable_counter_workaround); \ 69 + if (wa && wa->read_##reg) \ 70 + _val = wa->read_##reg(); \ 71 + else \ 72 + _val = read_sysreg(reg); \ 73 + preempt_enable(); \ 74 + } else { \ 75 + _val = read_sysreg(reg); \ 76 + } \ 77 + _val; \ 59 78 }) 60 79 61 80 /*
+2 -1
arch/arm64/include/asm/cpucaps.h
··· 37 37 #define ARM64_HAS_NO_FPSIMD 16 38 38 #define ARM64_WORKAROUND_REPEAT_TLBI 17 39 39 #define ARM64_WORKAROUND_QCOM_FALKOR_E1003 18 40 + #define ARM64_WORKAROUND_858921 19 40 41 41 - #define ARM64_NCAPS 19 42 + #define ARM64_NCAPS 20 42 43 43 44 #endif /* __ASM_CPUCAPS_H */
+2
arch/arm64/include/asm/cputype.h
··· 80 80 #define ARM_CPU_PART_FOUNDATION 0xD00 81 81 #define ARM_CPU_PART_CORTEX_A57 0xD07 82 82 #define ARM_CPU_PART_CORTEX_A53 0xD03 83 + #define ARM_CPU_PART_CORTEX_A73 0xD09 83 84 84 85 #define APM_CPU_PART_POTENZA 0x000 85 86 ··· 93 92 94 93 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) 95 94 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) 95 + #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) 96 96 #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) 97 97 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) 98 98 #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
+2
arch/arm64/include/asm/esr.h
··· 175 175 #define ESR_ELx_SYS64_ISS_SYS_CTR_READ (ESR_ELx_SYS64_ISS_SYS_CTR | \ 176 176 ESR_ELx_SYS64_ISS_DIR_READ) 177 177 178 + #define ESR_ELx_SYS64_ISS_SYS_CNTVCT (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 2, 14, 0) | \ 179 + ESR_ELx_SYS64_ISS_DIR_READ) 178 180 #ifndef __ASSEMBLY__ 179 181 #include <asm/types.h> 180 182
+15
arch/arm64/kernel/cpu_errata.c
··· 53 53 .midr_range_min = min, \ 54 54 .midr_range_max = max 55 55 56 + #define MIDR_ALL_VERSIONS(model) \ 57 + .def_scope = SCOPE_LOCAL_CPU, \ 58 + .matches = is_affected_midr_range, \ 59 + .midr_model = model, \ 60 + .midr_range_min = 0, \ 61 + .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK) 62 + 56 63 const struct arm64_cpu_capabilities arm64_errata[] = { 57 64 #if defined(CONFIG_ARM64_ERRATUM_826319) || \ 58 65 defined(CONFIG_ARM64_ERRATUM_827319) || \ ··· 156 149 MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 157 150 MIDR_CPU_VAR_REV(0, 0), 158 151 MIDR_CPU_VAR_REV(0, 0)), 152 + }, 153 + #endif 154 + #ifdef CONFIG_ARM64_ERRATUM_858921 155 + { 156 + /* Cortex-A73 all versions */ 157 + .desc = "ARM erratum 858921", 158 + .capability = ARM64_WORKAROUND_858921, 159 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 159 160 }, 160 161 #endif 161 162 {
+11 -2
arch/arm64/kernel/cpufeature.c
··· 1090 1090 * Check if the current CPU has a given feature capability. 1091 1091 * Should be called from non-preemptible context. 1092 1092 */ 1093 - bool this_cpu_has_cap(unsigned int cap) 1093 + static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, 1094 + unsigned int cap) 1094 1095 { 1095 1096 const struct arm64_cpu_capabilities *caps; 1096 1097 1097 1098 if (WARN_ON(preemptible())) 1098 1099 return false; 1099 1100 1100 - for (caps = arm64_features; caps->desc; caps++) 1101 + for (caps = cap_array; caps->desc; caps++) 1101 1102 if (caps->capability == cap && caps->matches) 1102 1103 return caps->matches(caps, SCOPE_LOCAL_CPU); 1103 1104 1104 1105 return false; 1106 + } 1107 + 1108 + extern const struct arm64_cpu_capabilities arm64_errata[]; 1109 + 1110 + bool this_cpu_has_cap(unsigned int cap) 1111 + { 1112 + return (__this_cpu_has_cap(arm64_features, cap) || 1113 + __this_cpu_has_cap(arm64_errata, cap)); 1105 1114 } 1106 1115 1107 1116 void __init setup_cpu_features(void)
+14
arch/arm64/kernel/traps.c
··· 505 505 regs->pc += 4; 506 506 } 507 507 508 + static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) 509 + { 510 + int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; 511 + 512 + pt_regs_write_reg(regs, rt, arch_counter_get_cntvct()); 513 + regs->pc += 4; 514 + } 515 + 508 516 struct sys64_hook { 509 517 unsigned int esr_mask; 510 518 unsigned int esr_val; ··· 530 522 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, 531 523 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ, 532 524 .handler = ctr_read_handler, 525 + }, 526 + { 527 + /* Trap read access to CNTVCT_EL0 */ 528 + .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, 529 + .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT, 530 + .handler = cntvct_read_handler, 533 531 }, 534 532 {}, 535 533 };
+4
arch/blackfin/kernel/time-ts.c
··· 230 230 clock_tick = get_sclk(); 231 231 evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift); 232 232 evt->max_delta_ns = clockevent_delta2ns(-1, evt); 233 + evt->max_delta_ticks = (unsigned long)-1; 233 234 evt->min_delta_ns = clockevent_delta2ns(100, evt); 235 + evt->min_delta_ticks = 100; 234 236 235 237 evt->cpumask = cpumask_of(0); 236 238 ··· 346 344 clock_tick = get_cclk() / TIME_SCALE; 347 345 evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift); 348 346 evt->max_delta_ns = clockevent_delta2ns(-1, evt); 347 + evt->max_delta_ticks = (unsigned long)-1; 349 348 evt->min_delta_ns = clockevent_delta2ns(100, evt); 349 + evt->min_delta_ticks = 100; 350 350 351 351 evt->cpumask = cpumask_of(cpu); 352 352
+2
arch/c6x/platforms/timer64.c
··· 234 234 clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5); 235 235 236 236 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 237 + cd->max_delta_ticks = 0x7fffffff; 237 238 cd->min_delta_ns = clockevent_delta2ns(250, cd); 239 + cd->min_delta_ticks = 250; 238 240 239 241 cd->cpumask = cpumask_of(smp_processor_id()); 240 242
+2
arch/hexagon/kernel/time.c
··· 199 199 clockevents_calc_mult_shift(ce_dev, sleep_clk_freq, 4); 200 200 201 201 ce_dev->max_delta_ns = clockevent_delta2ns(0x7fffffff, ce_dev); 202 + ce_dev->max_delta_ticks = 0x7fffffff; 202 203 ce_dev->min_delta_ns = clockevent_delta2ns(0xf, ce_dev); 204 + ce_dev->min_delta_ticks = 0xf; 203 205 204 206 #ifdef CONFIG_SMP 205 207 setup_percpu_clockdev();
+2
arch/m68k/coldfire/pit.c
··· 149 149 cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32); 150 150 cf_pit_clockevent.max_delta_ns = 151 151 clockevent_delta2ns(0xFFFF, &cf_pit_clockevent); 152 + cf_pit_clockevent.max_delta_ticks = 0xFFFF; 152 153 cf_pit_clockevent.min_delta_ns = 153 154 clockevent_delta2ns(0x3f, &cf_pit_clockevent); 155 + cf_pit_clockevent.min_delta_ticks = 0x3f; 154 156 clockevents_register_device(&cf_pit_clockevent); 155 157 156 158 setup_irq(MCF_IRQ_PIT1, &pit_irq);
+3 -1
arch/mips/alchemy/common/time.c
··· 138 138 cd->shift = 32; 139 139 cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift); 140 140 cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd); 141 - cd->min_delta_ns = clockevent_delta2ns(9, cd); /* ~0.28ms */ 141 + cd->max_delta_ticks = 0xffffffff; 142 + cd->min_delta_ns = clockevent_delta2ns(9, cd); 143 + cd->min_delta_ticks = 9; /* ~0.28ms */ 142 144 clockevents_register_device(cd); 143 145 setup_irq(m2int, &au1x_rtcmatch2_irqaction); 144 146
+2
arch/mips/jz4740/time.c
··· 145 145 146 146 clockevent_set_clock(&jz4740_clockevent, clk_rate); 147 147 jz4740_clockevent.min_delta_ns = clockevent_delta2ns(100, &jz4740_clockevent); 148 + jz4740_clockevent.min_delta_ticks = 100; 148 149 jz4740_clockevent.max_delta_ns = clockevent_delta2ns(0xffff, &jz4740_clockevent); 150 + jz4740_clockevent.max_delta_ticks = 0xffff; 149 151 jz4740_clockevent.cpumask = cpumask_of(0); 150 152 151 153 clockevents_register_device(&jz4740_clockevent);
+2
arch/mips/kernel/cevt-bcm1480.c
··· 123 123 CLOCK_EVT_FEAT_ONESHOT; 124 124 clockevent_set_clock(cd, V_SCD_TIMER_FREQ); 125 125 cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd); 126 + cd->max_delta_ticks = 0x7fffff; 126 127 cd->min_delta_ns = clockevent_delta2ns(2, cd); 128 + cd->min_delta_ticks = 2; 127 129 cd->rating = 200; 128 130 cd->irq = irq; 129 131 cd->cpumask = cpumask_of(cpu);
+2
arch/mips/kernel/cevt-ds1287.c
··· 128 128 cd->irq = irq; 129 129 clockevent_set_clock(cd, 32768); 130 130 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 131 + cd->max_delta_ticks = 0x7fffffff; 131 132 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 133 + cd->min_delta_ticks = 0x300; 132 134 cd->cpumask = cpumask_of(0); 133 135 134 136 clockevents_register_device(&ds1287_clockevent);
+2
arch/mips/kernel/cevt-gt641xx.c
··· 152 152 cd->rating = 200 + gt641xx_base_clock / 10000000; 153 153 clockevent_set_clock(cd, gt641xx_base_clock); 154 154 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 155 + cd->max_delta_ticks = 0x7fffffff; 155 156 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 157 + cd->min_delta_ticks = 0x300; 156 158 cd->cpumask = cpumask_of(0); 157 159 158 160 clockevents_register_device(&gt641xx_timer0_clockevent);
+2
arch/mips/kernel/cevt-sb1250.c
··· 123 123 CLOCK_EVT_FEAT_ONESHOT; 124 124 clockevent_set_clock(cd, V_SCD_TIMER_FREQ); 125 125 cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd); 126 + cd->max_delta_ticks = 0x7fffff; 126 127 cd->min_delta_ns = clockevent_delta2ns(2, cd); 128 + cd->min_delta_ticks = 2; 127 129 cd->rating = 200; 128 130 cd->irq = irq; 129 131 cd->cpumask = cpumask_of(cpu);
+2
arch/mips/kernel/cevt-txx9.c
··· 196 196 clockevent_set_clock(cd, TIMER_CLK(imbusclk)); 197 197 cd->max_delta_ns = 198 198 clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd); 199 + cd->max_delta_ticks = 0xffffffff >> (32 - TXX9_TIMER_BITS); 199 200 cd->min_delta_ns = clockevent_delta2ns(0xf, cd); 201 + cd->min_delta_ticks = 0xf; 200 202 cd->irq = irq; 201 203 cd->cpumask = cpumask_of(0), 202 204 clockevents_register_device(cd);
+2
arch/mips/loongson32/common/time.c
··· 199 199 200 200 clockevent_set_clock(cd, mips_hpt_frequency); 201 201 cd->max_delta_ns = clockevent_delta2ns(0xffffff, cd); 202 + cd->max_delta_ticks = 0xffffff; 202 203 cd->min_delta_ns = clockevent_delta2ns(0x000300, cd); 204 + cd->min_delta_ticks = 0x000300; 203 205 cd->cpumask = cpumask_of(smp_processor_id()); 204 206 clockevents_register_device(cd); 205 207
+2
arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
··· 123 123 cd->cpumask = cpumask_of(cpu); 124 124 clockevent_set_clock(cd, MFGPT_TICK_RATE); 125 125 cd->max_delta_ns = clockevent_delta2ns(0xffff, cd); 126 + cd->max_delta_ticks = 0xffff; 126 127 cd->min_delta_ns = clockevent_delta2ns(0xf, cd); 128 + cd->min_delta_ticks = 0xf; 127 129 128 130 /* Enable MFGPT0 Comparator 2 Output to the Interrupt Mapper */ 129 131 _wrmsr(DIVIL_MSR_REG(MFGPT_IRQ), 0, 0x100);
+2
arch/mips/loongson64/loongson-3/hpet.c
··· 241 241 cd->cpumask = cpumask_of(cpu); 242 242 clockevent_set_clock(cd, HPET_FREQ); 243 243 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 244 + cd->max_delta_ticks = 0x7fffffff; 244 245 cd->min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, cd); 246 + cd->min_delta_ticks = HPET_MIN_PROG_DELTA; 245 247 246 248 clockevents_register_device(cd); 247 249 setup_irq(HPET_T0_IRQ, &hpet_irq);
+30 -1
arch/mips/mti-malta/malta-time.c
··· 21 21 #include <linux/i8253.h> 22 22 #include <linux/init.h> 23 23 #include <linux/kernel_stat.h> 24 + #include <linux/libfdt.h> 24 25 #include <linux/math64.h> 25 26 #include <linux/sched.h> 26 27 #include <linux/spinlock.h> ··· 208 207 CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL); 209 208 } 210 209 210 + #ifdef CONFIG_CLKSRC_MIPS_GIC 211 + static u32 gic_frequency_dt; 212 + 213 + static struct property gic_frequency_prop = { 214 + .name = "clock-frequency", 215 + .length = sizeof(u32), 216 + .value = &gic_frequency_dt, 217 + }; 218 + 219 + static void update_gic_frequency_dt(void) 220 + { 221 + struct device_node *node; 222 + 223 + gic_frequency_dt = cpu_to_be32(gic_frequency); 224 + 225 + node = of_find_compatible_node(NULL, NULL, "mti,gic-timer"); 226 + if (!node) { 227 + pr_err("mti,gic-timer device node not found\n"); 228 + return; 229 + } 230 + 231 + if (of_update_property(node, &gic_frequency_prop) < 0) 232 + pr_err("error updating gic frequency property\n"); 233 + } 234 + 235 + #endif 236 + 211 237 void __init plat_time_init(void) 212 238 { 213 239 unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK); ··· 264 236 printk("GIC frequency %d.%02d MHz\n", freq/1000000, 265 237 (freq%1000000)*100/1000000); 266 238 #ifdef CONFIG_CLKSRC_MIPS_GIC 267 - gic_clocksource_init(gic_frequency); 239 + update_gic_frequency_dt(); 240 + clocksource_probe(); 268 241 #endif 269 242 } 270 243 #endif
+2
arch/mips/ralink/cevt-rt3352.c
··· 129 129 systick.dev.name = np->name; 130 130 clockevents_calc_mult_shift(&systick.dev, SYSTICK_FREQ, 60); 131 131 systick.dev.max_delta_ns = clockevent_delta2ns(0x7fff, &systick.dev); 132 + systick.dev.max_delta_ticks = 0x7fff; 132 133 systick.dev.min_delta_ns = clockevent_delta2ns(0x3, &systick.dev); 134 + systick.dev.min_delta_ticks = 0x3; 133 135 systick.dev.irq = irq_of_parse_and_map(np, 0); 134 136 if (!systick.dev.irq) { 135 137 pr_err("%s: request_irq failed", np->name);
+2
arch/mips/sgi-ip27/ip27-timer.c
··· 113 113 cd->features = CLOCK_EVT_FEAT_ONESHOT; 114 114 clockevent_set_clock(cd, CYCLES_PER_SEC); 115 115 cd->max_delta_ns = clockevent_delta2ns(0xfffffffffffff, cd); 116 + cd->max_delta_ticks = 0xfffffffffffff; 116 117 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 118 + cd->min_delta_ticks = 0x300; 117 119 cd->rating = 200; 118 120 cd->irq = irq; 119 121 cd->cpumask = cpumask_of(cpu);
+2
arch/mn10300/kernel/cevt-mn10300.c
··· 98 98 99 99 /* Calculate the min / max delta */ 100 100 cd->max_delta_ns = clockevent_delta2ns(TMJCBR_MAX, cd); 101 + cd->max_delta_ticks = TMJCBR_MAX; 101 102 cd->min_delta_ns = clockevent_delta2ns(100, cd); 103 + cd->min_delta_ticks = 100; 102 104 103 105 cd->rating = 200; 104 106 cd->cpumask = cpumask_of(smp_processor_id());
+2
arch/powerpc/kernel/time.c
··· 995 995 996 996 decrementer_clockevent.max_delta_ns = 997 997 clockevent_delta2ns(decrementer_max, &decrementer_clockevent); 998 + decrementer_clockevent.max_delta_ticks = decrementer_max; 998 999 decrementer_clockevent.min_delta_ns = 999 1000 clockevent_delta2ns(2, &decrementer_clockevent); 1001 + decrementer_clockevent.min_delta_ticks = 2; 1000 1002 1001 1003 register_decrementer_clockevent(cpu); 1002 1004 }
+2
arch/s390/kernel/time.c
··· 158 158 cd->mult = 16777; 159 159 cd->shift = 12; 160 160 cd->min_delta_ns = 1; 161 + cd->min_delta_ticks = 1; 161 162 cd->max_delta_ns = LONG_MAX; 163 + cd->max_delta_ticks = ULONG_MAX; 162 164 cd->rating = 400; 163 165 cd->cpumask = cpumask_of(cpu); 164 166 cd->set_next_event = s390_next_event;
+2
arch/score/kernel/time.c
··· 81 81 score_clockevent.shift); 82 82 score_clockevent.max_delta_ns = clockevent_delta2ns((u32)~0, 83 83 &score_clockevent); 84 + score_clockevent.max_delta_ticks = (u32)~0; 84 85 score_clockevent.min_delta_ns = clockevent_delta2ns(50, 85 86 &score_clockevent) + 1; 87 + score_clockevent.min_delta_ticks = 50; 86 88 score_clockevent.cpumask = cpumask_of(0); 87 89 clockevents_register_device(&score_clockevent); 88 90 }
+2
arch/sparc/kernel/time_32.c
··· 228 228 ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC, 229 229 ce->shift); 230 230 ce->max_delta_ns = clockevent_delta2ns(sparc_config.clock_rate, ce); 231 + ce->max_delta_ticks = (unsigned long)sparc_config.clock_rate; 231 232 ce->min_delta_ns = clockevent_delta2ns(100, ce); 233 + ce->min_delta_ticks = 100; 232 234 233 235 clockevents_register_device(ce); 234 236 }
+2
arch/sparc/kernel/time_64.c
··· 796 796 797 797 sparc64_clockevent.max_delta_ns = 798 798 clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent); 799 + sparc64_clockevent.max_delta_ticks = 0x7fffffffffffffffUL; 799 800 sparc64_clockevent.min_delta_ns = 800 801 clockevent_delta2ns(0xF, &sparc64_clockevent); 802 + sparc64_clockevent.min_delta_ticks = 0xF; 801 803 802 804 printk("clockevent: mult[%x] shift[%d]\n", 803 805 sparc64_clockevent.mult, sparc64_clockevent.shift);
+2
arch/tile/kernel/time.c
··· 155 155 .name = "tile timer", 156 156 .features = CLOCK_EVT_FEAT_ONESHOT, 157 157 .min_delta_ns = 1000, 158 + .min_delta_ticks = 1, 159 + .max_delta_ticks = MAX_TICK, 158 160 .rating = 100, 159 161 .irq = -1, 160 162 .set_next_event = tile_timer_set_next_event,
+3 -1
arch/um/kernel/time.c
··· 65 65 .set_next_event = itimer_next_event, 66 66 .shift = 0, 67 67 .max_delta_ns = 0xffffffff, 68 - .min_delta_ns = TIMER_MIN_DELTA, //microsecond resolution should be enough for anyone, same as 640K RAM 68 + .max_delta_ticks = 0xffffffff, 69 + .min_delta_ns = TIMER_MIN_DELTA, 70 + .min_delta_ticks = TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM 69 71 .irq = 0, 70 72 .mult = 1, 71 73 };
+2
arch/unicore32/kernel/time.c
··· 91 91 92 92 ckevt_puv3_osmr0.max_delta_ns = 93 93 clockevent_delta2ns(0x7fffffff, &ckevt_puv3_osmr0); 94 + ckevt_puv3_osmr0.max_delta_ticks = 0x7fffffff; 94 95 ckevt_puv3_osmr0.min_delta_ns = 95 96 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_puv3_osmr0) + 1; 97 + ckevt_puv3_osmr0.min_delta_ticks = MIN_OSCR_DELTA * 2; 96 98 ckevt_puv3_osmr0.cpumask = cpumask_of(0); 97 99 98 100 setup_irq(IRQ_TIMER0, &puv3_timer_irq);
+4
arch/x86/kernel/apic/apic.c
··· 731 731 TICK_NSEC, lapic_clockevent.shift); 732 732 lapic_clockevent.max_delta_ns = 733 733 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); 734 + lapic_clockevent.max_delta_ticks = 0x7FFFFF; 734 735 lapic_clockevent.min_delta_ns = 735 736 clockevent_delta2ns(0xF, &lapic_clockevent); 737 + lapic_clockevent.min_delta_ticks = 0xF; 736 738 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; 737 739 return 0; 738 740 } ··· 780 778 lapic_clockevent.shift); 781 779 lapic_clockevent.max_delta_ns = 782 780 clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent); 781 + lapic_clockevent.max_delta_ticks = 0x7FFFFFFF; 783 782 lapic_clockevent.min_delta_ns = 784 783 clockevent_delta2ns(0xF, &lapic_clockevent); 784 + lapic_clockevent.min_delta_ticks = 0xF; 785 785 786 786 lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS; 787 787
+2
arch/x86/lguest/boot.c
··· 994 994 .mult = 1, 995 995 .shift = 0, 996 996 .min_delta_ns = LG_CLOCK_MIN_DELTA, 997 + .min_delta_ticks = LG_CLOCK_MIN_DELTA, 997 998 .max_delta_ns = LG_CLOCK_MAX_DELTA, 999 + .max_delta_ticks = LG_CLOCK_MAX_DELTA, 998 1000 }; 999 1001 1000 1002 /*
+2
arch/x86/platform/uv/uv_time.c
··· 390 390 391 391 clock_event_device_uv.min_delta_ns = NSEC_PER_SEC / 392 392 sn_rtc_cycles_per_second; 393 + clock_event_device_uv.min_delta_ticks = 1; 393 394 394 395 clock_event_device_uv.max_delta_ns = clocksource_uv.mask * 395 396 (NSEC_PER_SEC / sn_rtc_cycles_per_second); 397 + clock_event_device_uv.max_delta_ticks = clocksource_uv.mask; 396 398 397 399 rc = schedule_on_each_cpu(uv_rtc_register_clockevents); 398 400 if (rc) {
+4
arch/x86/xen/time.c
··· 209 209 .features = CLOCK_EVT_FEAT_ONESHOT, 210 210 211 211 .max_delta_ns = 0xffffffff, 212 + .max_delta_ticks = 0xffffffff, 212 213 .min_delta_ns = TIMER_SLOP, 214 + .min_delta_ticks = TIMER_SLOP, 213 215 214 216 .mult = 1, 215 217 .shift = 0, ··· 270 268 .features = CLOCK_EVT_FEAT_ONESHOT, 271 269 272 270 .max_delta_ns = 0xffffffff, 271 + .max_delta_ticks = 0xffffffff, 273 272 .min_delta_ns = TIMER_SLOP, 273 + .min_delta_ticks = TIMER_SLOP, 274 274 275 275 .mult = 1, 276 276 .shift = 0,
+3
drivers/acpi/arm64/Kconfig
··· 4 4 5 5 config ACPI_IORT 6 6 bool 7 + 8 + config ACPI_GTDT 9 + bool
+1
drivers/acpi/arm64/Makefile
··· 1 1 obj-$(CONFIG_ACPI_IORT) += iort.o 2 + obj-$(CONFIG_ACPI_GTDT) += gtdt.o
+417
drivers/acpi/arm64/gtdt.c
··· 1 + /* 2 + * ARM Specific GTDT table Support 3 + * 4 + * Copyright (C) 2016, Linaro Ltd. 5 + * Author: Daniel Lezcano <daniel.lezcano@linaro.org> 6 + * Fu Wei <fu.wei@linaro.org> 7 + * Hanjun Guo <hanjun.guo@linaro.org> 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + */ 13 + 14 + #include <linux/acpi.h> 15 + #include <linux/init.h> 16 + #include <linux/irqdomain.h> 17 + #include <linux/kernel.h> 18 + #include <linux/platform_device.h> 19 + 20 + #include <clocksource/arm_arch_timer.h> 21 + 22 + #undef pr_fmt 23 + #define pr_fmt(fmt) "ACPI GTDT: " fmt 24 + 25 + /** 26 + * struct acpi_gtdt_descriptor - Store the key info of GTDT for all functions 27 + * @gtdt: The pointer to the struct acpi_table_gtdt of GTDT table. 28 + * @gtdt_end: The pointer to the end of GTDT table. 29 + * @platform_timer: The pointer to the start of Platform Timer Structure 30 + * 31 + * The struct store the key info of GTDT table, it should be initialized by 32 + * acpi_gtdt_init. 33 + */ 34 + struct acpi_gtdt_descriptor { 35 + struct acpi_table_gtdt *gtdt; 36 + void *gtdt_end; 37 + void *platform_timer; 38 + }; 39 + 40 + static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata; 41 + 42 + static inline void *next_platform_timer(void *platform_timer) 43 + { 44 + struct acpi_gtdt_header *gh = platform_timer; 45 + 46 + platform_timer += gh->length; 47 + if (platform_timer < acpi_gtdt_desc.gtdt_end) 48 + return platform_timer; 49 + 50 + return NULL; 51 + } 52 + 53 + #define for_each_platform_timer(_g) \ 54 + for (_g = acpi_gtdt_desc.platform_timer; _g; \ 55 + _g = next_platform_timer(_g)) 56 + 57 + static inline bool is_timer_block(void *platform_timer) 58 + { 59 + struct acpi_gtdt_header *gh = platform_timer; 60 + 61 + return gh->type == ACPI_GTDT_TYPE_TIMER_BLOCK; 62 + } 63 + 64 + static inline bool is_non_secure_watchdog(void *platform_timer) 65 + { 66 + struct acpi_gtdt_header *gh = platform_timer; 67 + struct acpi_gtdt_watchdog *wd = platform_timer; 68 + 69 + if (gh->type != ACPI_GTDT_TYPE_WATCHDOG) 70 + return false; 71 + 72 + return !(wd->timer_flags & ACPI_GTDT_WATCHDOG_SECURE); 73 + } 74 + 75 + static int __init map_gt_gsi(u32 interrupt, u32 flags) 76 + { 77 + int trigger, polarity; 78 + 79 + trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE 80 + : ACPI_LEVEL_SENSITIVE; 81 + 82 + polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW 83 + : ACPI_ACTIVE_HIGH; 84 + 85 + return acpi_register_gsi(NULL, interrupt, trigger, polarity); 86 + } 87 + 88 + /** 89 + * acpi_gtdt_map_ppi() - Map the PPIs of per-cpu arch_timer. 90 + * @type: the type of PPI. 91 + * 92 + * Note: Secure state is not managed by the kernel on ARM64 systems. 93 + * So we only handle the non-secure timer PPIs, 94 + * ARCH_TIMER_PHYS_SECURE_PPI is treated as invalid type. 95 + * 96 + * Return: the mapped PPI value, 0 if error. 97 + */ 98 + int __init acpi_gtdt_map_ppi(int type) 99 + { 100 + struct acpi_table_gtdt *gtdt = acpi_gtdt_desc.gtdt; 101 + 102 + switch (type) { 103 + case ARCH_TIMER_PHYS_NONSECURE_PPI: 104 + return map_gt_gsi(gtdt->non_secure_el1_interrupt, 105 + gtdt->non_secure_el1_flags); 106 + case ARCH_TIMER_VIRT_PPI: 107 + return map_gt_gsi(gtdt->virtual_timer_interrupt, 108 + gtdt->virtual_timer_flags); 109 + 110 + case ARCH_TIMER_HYP_PPI: 111 + return map_gt_gsi(gtdt->non_secure_el2_interrupt, 112 + gtdt->non_secure_el2_flags); 113 + default: 114 + pr_err("Failed to map timer interrupt: invalid type.\n"); 115 + } 116 + 117 + return 0; 118 + } 119 + 120 + /** 121 + * acpi_gtdt_c3stop() - Got c3stop info from GTDT according to the type of PPI. 122 + * @type: the type of PPI. 123 + * 124 + * Return: true if the timer HW state is lost when a CPU enters an idle state, 125 + * false otherwise 126 + */ 127 + bool __init acpi_gtdt_c3stop(int type) 128 + { 129 + struct acpi_table_gtdt *gtdt = acpi_gtdt_desc.gtdt; 130 + 131 + switch (type) { 132 + case ARCH_TIMER_PHYS_NONSECURE_PPI: 133 + return !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON); 134 + 135 + case ARCH_TIMER_VIRT_PPI: 136 + return !(gtdt->virtual_timer_flags & ACPI_GTDT_ALWAYS_ON); 137 + 138 + case ARCH_TIMER_HYP_PPI: 139 + return !(gtdt->non_secure_el2_flags & ACPI_GTDT_ALWAYS_ON); 140 + 141 + default: 142 + pr_err("Failed to get c3stop info: invalid type.\n"); 143 + } 144 + 145 + return false; 146 + } 147 + 148 + /** 149 + * acpi_gtdt_init() - Get the info of GTDT table to prepare for further init. 150 + * @table: The pointer to GTDT table. 151 + * @platform_timer_count: It points to a integer variable which is used 152 + * for storing the number of platform timers. 153 + * This pointer could be NULL, if the caller 154 + * doesn't need this info. 155 + * 156 + * Return: 0 if success, -EINVAL if error. 157 + */ 158 + int __init acpi_gtdt_init(struct acpi_table_header *table, 159 + int *platform_timer_count) 160 + { 161 + void *platform_timer; 162 + struct acpi_table_gtdt *gtdt; 163 + 164 + gtdt = container_of(table, struct acpi_table_gtdt, header); 165 + acpi_gtdt_desc.gtdt = gtdt; 166 + acpi_gtdt_desc.gtdt_end = (void *)table + table->length; 167 + acpi_gtdt_desc.platform_timer = NULL; 168 + if (platform_timer_count) 169 + *platform_timer_count = 0; 170 + 171 + if (table->revision < 2) { 172 + pr_warn("Revision:%d doesn't support Platform Timers.\n", 173 + table->revision); 174 + return 0; 175 + } 176 + 177 + if (!gtdt->platform_timer_count) { 178 + pr_debug("No Platform Timer.\n"); 179 + return 0; 180 + } 181 + 182 + platform_timer = (void *)gtdt + gtdt->platform_timer_offset; 183 + if (platform_timer < (void *)table + sizeof(struct acpi_table_gtdt)) { 184 + pr_err(FW_BUG "invalid timer data.\n"); 185 + return -EINVAL; 186 + } 187 + acpi_gtdt_desc.platform_timer = platform_timer; 188 + if (platform_timer_count) 189 + *platform_timer_count = gtdt->platform_timer_count; 190 + 191 + return 0; 192 + } 193 + 194 + static int __init gtdt_parse_timer_block(struct acpi_gtdt_timer_block *block, 195 + struct arch_timer_mem *timer_mem) 196 + { 197 + int i; 198 + struct arch_timer_mem_frame *frame; 199 + struct acpi_gtdt_timer_entry *gtdt_frame; 200 + 201 + if (!block->timer_count) { 202 + pr_err(FW_BUG "GT block present, but frame count is zero."); 203 + return -ENODEV; 204 + } 205 + 206 + if (block->timer_count > ARCH_TIMER_MEM_MAX_FRAMES) { 207 + pr_err(FW_BUG "GT block lists %d frames, ACPI spec only allows 8\n", 208 + block->timer_count); 209 + return -EINVAL; 210 + } 211 + 212 + timer_mem->cntctlbase = (phys_addr_t)block->block_address; 213 + /* 214 + * The CNTCTLBase frame is 4KB (register offsets 0x000 - 0xFFC). 215 + * See ARM DDI 0487A.k_iss10775, page I1-5129, Table I1-3 216 + * "CNTCTLBase memory map". 217 + */ 218 + timer_mem->size = SZ_4K; 219 + 220 + gtdt_frame = (void *)block + block->timer_offset; 221 + if (gtdt_frame + block->timer_count != (void *)block + block->header.length) 222 + return -EINVAL; 223 + 224 + /* 225 + * Get the GT timer Frame data for every GT Block Timer 226 + */ 227 + for (i = 0; i < block->timer_count; i++, gtdt_frame++) { 228 + if (gtdt_frame->common_flags & ACPI_GTDT_GT_IS_SECURE_TIMER) 229 + continue; 230 + if (gtdt_frame->frame_number >= ARCH_TIMER_MEM_MAX_FRAMES || 231 + !gtdt_frame->base_address || !gtdt_frame->timer_interrupt) 232 + goto error; 233 + 234 + frame = &timer_mem->frame[gtdt_frame->frame_number]; 235 + 236 + /* duplicate frame */ 237 + if (frame->valid) 238 + goto error; 239 + 240 + frame->phys_irq = map_gt_gsi(gtdt_frame->timer_interrupt, 241 + gtdt_frame->timer_flags); 242 + if (frame->phys_irq <= 0) { 243 + pr_warn("failed to map physical timer irq in frame %d.\n", 244 + gtdt_frame->frame_number); 245 + goto error; 246 + } 247 + 248 + if (gtdt_frame->virtual_timer_interrupt) { 249 + frame->virt_irq = 250 + map_gt_gsi(gtdt_frame->virtual_timer_interrupt, 251 + gtdt_frame->virtual_timer_flags); 252 + if (frame->virt_irq <= 0) { 253 + pr_warn("failed to map virtual timer irq in frame %d.\n", 254 + gtdt_frame->frame_number); 255 + goto error; 256 + } 257 + } else { 258 + pr_debug("virtual timer in frame %d not implemented.\n", 259 + gtdt_frame->frame_number); 260 + } 261 + 262 + frame->cntbase = gtdt_frame->base_address; 263 + /* 264 + * The CNTBaseN frame is 4KB (register offsets 0x000 - 0xFFC). 265 + * See ARM DDI 0487A.k_iss10775, page I1-5130, Table I1-4 266 + * "CNTBaseN memory map". 267 + */ 268 + frame->size = SZ_4K; 269 + frame->valid = true; 270 + } 271 + 272 + return 0; 273 + 274 + error: 275 + do { 276 + if (gtdt_frame->common_flags & ACPI_GTDT_GT_IS_SECURE_TIMER || 277 + gtdt_frame->frame_number >= ARCH_TIMER_MEM_MAX_FRAMES) 278 + continue; 279 + 280 + frame = &timer_mem->frame[gtdt_frame->frame_number]; 281 + 282 + if (frame->phys_irq > 0) 283 + acpi_unregister_gsi(gtdt_frame->timer_interrupt); 284 + frame->phys_irq = 0; 285 + 286 + if (frame->virt_irq > 0) 287 + acpi_unregister_gsi(gtdt_frame->virtual_timer_interrupt); 288 + frame->virt_irq = 0; 289 + } while (i-- >= 0 && gtdt_frame--); 290 + 291 + return -EINVAL; 292 + } 293 + 294 + /** 295 + * acpi_arch_timer_mem_init() - Get the info of all GT blocks in GTDT table. 296 + * @timer_mem: The pointer to the array of struct arch_timer_mem for returning 297 + * the result of parsing. The element number of this array should 298 + * be platform_timer_count(the total number of platform timers). 299 + * @timer_count: It points to a integer variable which is used for storing the 300 + * number of GT blocks we have parsed. 301 + * 302 + * Return: 0 if success, -EINVAL/-ENODEV if error. 303 + */ 304 + int __init acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, 305 + int *timer_count) 306 + { 307 + int ret; 308 + void *platform_timer; 309 + 310 + *timer_count = 0; 311 + for_each_platform_timer(platform_timer) { 312 + if (is_timer_block(platform_timer)) { 313 + ret = gtdt_parse_timer_block(platform_timer, timer_mem); 314 + if (ret) 315 + return ret; 316 + timer_mem++; 317 + (*timer_count)++; 318 + } 319 + } 320 + 321 + if (*timer_count) 322 + pr_info("found %d memory-mapped timer block(s).\n", 323 + *timer_count); 324 + 325 + return 0; 326 + } 327 + 328 + /* 329 + * Initialize a SBSA generic Watchdog platform device info from GTDT 330 + */ 331 + static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd, 332 + int index) 333 + { 334 + struct platform_device *pdev; 335 + int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags); 336 + 337 + /* 338 + * According to SBSA specification the size of refresh and control 339 + * frames of SBSA Generic Watchdog is SZ_4K(Offset 0x000 – 0xFFF). 340 + */ 341 + struct resource res[] = { 342 + DEFINE_RES_MEM(wd->control_frame_address, SZ_4K), 343 + DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K), 344 + DEFINE_RES_IRQ(irq), 345 + }; 346 + int nr_res = ARRAY_SIZE(res); 347 + 348 + pr_debug("found a Watchdog (0x%llx/0x%llx gsi:%u flags:0x%x).\n", 349 + wd->refresh_frame_address, wd->control_frame_address, 350 + wd->timer_interrupt, wd->timer_flags); 351 + 352 + if (!(wd->refresh_frame_address && wd->control_frame_address)) { 353 + pr_err(FW_BUG "failed to get the Watchdog base address.\n"); 354 + acpi_unregister_gsi(wd->timer_interrupt); 355 + return -EINVAL; 356 + } 357 + 358 + if (irq <= 0) { 359 + pr_warn("failed to map the Watchdog interrupt.\n"); 360 + nr_res--; 361 + } 362 + 363 + /* 364 + * Add a platform device named "sbsa-gwdt" to match the platform driver. 365 + * "sbsa-gwdt": SBSA(Server Base System Architecture) Generic Watchdog 366 + * The platform driver can get device info below by matching this name. 367 + */ 368 + pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res); 369 + if (IS_ERR(pdev)) { 370 + acpi_unregister_gsi(wd->timer_interrupt); 371 + return PTR_ERR(pdev); 372 + } 373 + 374 + return 0; 375 + } 376 + 377 + static int __init gtdt_sbsa_gwdt_init(void) 378 + { 379 + void *platform_timer; 380 + struct acpi_table_header *table; 381 + int ret, timer_count, gwdt_count = 0; 382 + 383 + if (acpi_disabled) 384 + return 0; 385 + 386 + if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_GTDT, 0, &table))) 387 + return -EINVAL; 388 + 389 + /* 390 + * Note: Even though the global variable acpi_gtdt_desc has been 391 + * initialized by acpi_gtdt_init() while initializing the arch timers, 392 + * when we call this function to get SBSA watchdogs info from GTDT, the 393 + * pointers stashed in it are stale (since they are early temporary 394 + * mappings carried out before acpi_permanent_mmap is set) and we need 395 + * to re-initialize them with permanent mapped pointer values to let the 396 + * GTDT parsing possible. 397 + */ 398 + ret = acpi_gtdt_init(table, &timer_count); 399 + if (ret || !timer_count) 400 + return ret; 401 + 402 + for_each_platform_timer(platform_timer) { 403 + if (is_non_secure_watchdog(platform_timer)) { 404 + ret = gtdt_import_sbsa_gwdt(platform_timer, gwdt_count); 405 + if (ret) 406 + break; 407 + gwdt_count++; 408 + } 409 + } 410 + 411 + if (gwdt_count) 412 + pr_info("found %d SBSA generic Watchdog(s).\n", gwdt_count); 413 + 414 + return ret; 415 + } 416 + 417 + device_initcall(gtdt_sbsa_gwdt_init);
+14 -14
drivers/char/mmtimer.c
··· 478 478 static struct timespec sgi_clock_offset; 479 479 static int sgi_clock_period; 480 480 481 - static int sgi_clock_get(clockid_t clockid, struct timespec *tp) 481 + static int sgi_clock_get(clockid_t clockid, struct timespec64 *tp) 482 482 { 483 483 u64 nsec; 484 484 485 485 nsec = rtc_time() * sgi_clock_period 486 486 + sgi_clock_offset.tv_nsec; 487 - *tp = ns_to_timespec(nsec); 487 + *tp = ns_to_timespec64(nsec); 488 488 tp->tv_sec += sgi_clock_offset.tv_sec; 489 489 return 0; 490 490 }; 491 491 492 - static int sgi_clock_set(const clockid_t clockid, const struct timespec *tp) 492 + static int sgi_clock_set(const clockid_t clockid, const struct timespec64 *tp) 493 493 { 494 494 495 495 u64 nsec; ··· 657 657 } 658 658 659 659 /* Assumption: it_lock is already held with irq's disabled */ 660 - static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) 660 + static void sgi_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting) 661 661 { 662 662 663 663 if (timr->it.mmtimer.clock == TIMER_OFF) { ··· 668 668 return; 669 669 } 670 670 671 - cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period); 672 - cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period); 671 + cur_setting->it_interval = ns_to_timespec64(timr->it.mmtimer.incr * sgi_clock_period); 672 + cur_setting->it_value = ns_to_timespec64((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period); 673 673 } 674 674 675 675 676 676 static int sgi_timer_set(struct k_itimer *timr, int flags, 677 - struct itimerspec * new_setting, 678 - struct itimerspec * old_setting) 677 + struct itimerspec64 *new_setting, 678 + struct itimerspec64 *old_setting) 679 679 { 680 680 unsigned long when, period, irqflags; 681 681 int err = 0; ··· 687 687 sgi_timer_get(timr, old_setting); 688 688 689 689 sgi_timer_del(timr); 690 - when = timespec_to_ns(&new_setting->it_value); 691 - period = timespec_to_ns(&new_setting->it_interval); 690 + when = timespec64_to_ns(&new_setting->it_value); 691 + period = timespec64_to_ns(&new_setting->it_interval); 692 692 693 693 if (when == 0) 694 694 /* Clear timer */ ··· 699 699 return -ENOMEM; 700 700 701 701 if (flags & TIMER_ABSTIME) { 702 - struct timespec n; 702 + struct timespec64 n; 703 703 unsigned long now; 704 704 705 - getnstimeofday(&n); 706 - now = timespec_to_ns(&n); 705 + getnstimeofday64(&n); 706 + now = timespec64_to_ns(&n); 707 707 if (when > now) 708 708 when -= now; 709 709 else ··· 765 765 return err; 766 766 } 767 767 768 - static int sgi_clock_getres(const clockid_t which_clock, struct timespec *tp) 768 + static int sgi_clock_getres(const clockid_t which_clock, struct timespec64 *tp) 769 769 { 770 770 tp->tv_sec = 0; 771 771 tp->tv_nsec = sgi_clock_period;
+16 -3
drivers/clocksource/Kconfig
··· 67 67 select DW_APB_TIMER 68 68 select CLKSRC_OF 69 69 70 - config GEMINI_TIMER 71 - bool "Cortina Gemini timer driver" if COMPILE_TEST 70 + config FTTMR010_TIMER 71 + bool "Faraday Technology timer driver" if COMPILE_TEST 72 72 depends on GENERIC_CLOCKEVENTS 73 73 depends on HAS_IOMEM 74 74 select CLKSRC_MMIO 75 75 select CLKSRC_OF 76 76 select MFD_SYSCON 77 77 help 78 - Enables support for the Gemini timer 78 + Enables support for the Faraday Technology timer block 79 + FTTMR010. 79 80 80 81 config ROCKCHIP_TIMER 81 82 bool "Rockchip timer driver" if COMPILE_TEST 82 83 depends on ARM || ARM64 83 84 select CLKSRC_OF 85 + select CLKSRC_MMIO 84 86 help 85 87 Enables the support for the rockchip timer driver. 86 88 ··· 367 365 This option enables a workaround for Hisilicon Erratum 368 366 161010101. The workaround will be active if the hisilicon,erratum-161010101 369 367 property is found in the timer node. 368 + 369 + config ARM64_ERRATUM_858921 370 + bool "Workaround for Cortex-A73 erratum 858921" 371 + default y 372 + select ARM_ARCH_TIMER_OOL_WORKAROUND 373 + depends on ARM_ARCH_TIMER && ARM64 374 + help 375 + This option enables a workaround applicable to Cortex-A73 376 + (all versions), whose counter may return incorrect values. 377 + The workaround will be dynamically enabled when an affected 378 + core is detected. 370 379 371 380 config ARM_GLOBAL_TIMER 372 381 bool "Support for the ARM global timer" if COMPILE_TEST
+1 -1
drivers/clocksource/Makefile
··· 17 17 obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o 18 18 obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o 19 19 obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o 20 - obj-$(CONFIG_GEMINI_TIMER) += timer-gemini.o 20 + obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o 21 21 obj-$(CONFIG_ROCKCHIP_TIMER) += rockchip_timer.o 22 22 obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o 23 23 obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
+7 -7
drivers/clocksource/arc_timer.c
··· 37 37 38 38 clk = of_clk_get(node, 0); 39 39 if (IS_ERR(clk)) { 40 - pr_err("timer missing clk"); 40 + pr_err("timer missing clk\n"); 41 41 return PTR_ERR(clk); 42 42 } 43 43 ··· 89 89 90 90 READ_BCR(ARC_REG_MCIP_BCR, mp); 91 91 if (!mp.gfrc) { 92 - pr_warn("Global-64-bit-Ctr clocksource not detected"); 92 + pr_warn("Global-64-bit-Ctr clocksource not detected\n"); 93 93 return -ENXIO; 94 94 } 95 95 ··· 140 140 141 141 READ_BCR(ARC_REG_TIMERS_BCR, timer); 142 142 if (!timer.rtc) { 143 - pr_warn("Local-64-bit-Ctr clocksource not detected"); 143 + pr_warn("Local-64-bit-Ctr clocksource not detected\n"); 144 144 return -ENXIO; 145 145 } 146 146 147 147 /* Local to CPU hence not usable in SMP */ 148 148 if (IS_ENABLED(CONFIG_SMP)) { 149 - pr_warn("Local-64-bit-Ctr not usable in SMP"); 149 + pr_warn("Local-64-bit-Ctr not usable in SMP\n"); 150 150 return -EINVAL; 151 151 } 152 152 ··· 290 290 291 291 arc_timer_irq = irq_of_parse_and_map(node, 0); 292 292 if (arc_timer_irq <= 0) { 293 - pr_err("clockevent: missing irq"); 293 + pr_err("clockevent: missing irq\n"); 294 294 return -EINVAL; 295 295 } 296 296 297 297 ret = arc_get_timer_clk(node); 298 298 if (ret) { 299 - pr_err("clockevent: missing clk"); 299 + pr_err("clockevent: missing clk\n"); 300 300 return ret; 301 301 } 302 302 ··· 313 313 arc_timer_starting_cpu, 314 314 arc_timer_dying_cpu); 315 315 if (ret) { 316 - pr_err("Failed to setup hotplug state"); 316 + pr_err("Failed to setup hotplug state\n"); 317 317 return ret; 318 318 } 319 319 return 0;
+778 -392
drivers/clocksource/arm_arch_timer.c
··· 33 33 34 34 #include <clocksource/arm_arch_timer.h> 35 35 36 + #undef pr_fmt 37 + #define pr_fmt(fmt) "arch_timer: " fmt 38 + 36 39 #define CNTTIDR 0x08 37 40 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) 38 41 ··· 55 52 #define CNTV_TVAL 0x38 56 53 #define CNTV_CTL 0x3c 57 54 58 - #define ARCH_CP15_TIMER BIT(0) 59 - #define ARCH_MEM_TIMER BIT(1) 60 55 static unsigned arch_timers_present __initdata; 61 56 62 57 static void __iomem *arch_counter_base; ··· 67 66 #define to_arch_timer(e) container_of(e, struct arch_timer, evt) 68 67 69 68 static u32 arch_timer_rate; 70 - 71 - enum ppi_nr { 72 - PHYS_SECURE_PPI, 73 - PHYS_NONSECURE_PPI, 74 - VIRT_PPI, 75 - HYP_PPI, 76 - MAX_TIMER_PPI 77 - }; 78 - 79 - static int arch_timer_ppi[MAX_TIMER_PPI]; 69 + static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI]; 80 70 81 71 static struct clock_event_device __percpu *arch_timer_evt; 82 72 83 - static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI; 73 + static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI; 84 74 static bool arch_timer_c3stop; 85 75 static bool arch_timer_mem_use_virtual; 86 76 static bool arch_counter_suspend_stop; 77 + static bool vdso_default = true; 87 78 88 79 static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); 89 80 ··· 88 95 /* 89 96 * Architected system timer support. 90 97 */ 98 + 99 + static __always_inline 100 + void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, 101 + struct clock_event_device *clk) 102 + { 103 + if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 104 + struct arch_timer *timer = to_arch_timer(clk); 105 + switch (reg) { 106 + case ARCH_TIMER_REG_CTRL: 107 + writel_relaxed(val, timer->base + CNTP_CTL); 108 + break; 109 + case ARCH_TIMER_REG_TVAL: 110 + writel_relaxed(val, timer->base + CNTP_TVAL); 111 + break; 112 + } 113 + } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 114 + struct arch_timer *timer = to_arch_timer(clk); 115 + switch (reg) { 116 + case ARCH_TIMER_REG_CTRL: 117 + writel_relaxed(val, timer->base + CNTV_CTL); 118 + break; 119 + case ARCH_TIMER_REG_TVAL: 120 + writel_relaxed(val, timer->base + CNTV_TVAL); 121 + break; 122 + } 123 + } else { 124 + arch_timer_reg_write_cp15(access, reg, val); 125 + } 126 + } 127 + 128 + static __always_inline 129 + u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, 130 + struct clock_event_device *clk) 131 + { 132 + u32 val; 133 + 134 + if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 135 + struct arch_timer *timer = to_arch_timer(clk); 136 + switch (reg) { 137 + case ARCH_TIMER_REG_CTRL: 138 + val = readl_relaxed(timer->base + CNTP_CTL); 139 + break; 140 + case ARCH_TIMER_REG_TVAL: 141 + val = readl_relaxed(timer->base + CNTP_TVAL); 142 + break; 143 + } 144 + } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 145 + struct arch_timer *timer = to_arch_timer(clk); 146 + switch (reg) { 147 + case ARCH_TIMER_REG_CTRL: 148 + val = readl_relaxed(timer->base + CNTV_CTL); 149 + break; 150 + case ARCH_TIMER_REG_TVAL: 151 + val = readl_relaxed(timer->base + CNTV_TVAL); 152 + break; 153 + } 154 + } else { 155 + val = arch_timer_reg_read_cp15(access, reg); 156 + } 157 + 158 + return val; 159 + } 160 + 161 + /* 162 + * Default to cp15 based access because arm64 uses this function for 163 + * sched_clock() before DT is probed and the cp15 method is guaranteed 164 + * to exist on arm64. arm doesn't use this before DT is probed so even 165 + * if we don't have the cp15 accessors we won't have a problem. 166 + */ 167 + u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; 168 + 169 + static u64 arch_counter_read(struct clocksource *cs) 170 + { 171 + return arch_timer_read_counter(); 172 + } 173 + 174 + static u64 arch_counter_read_cc(const struct cyclecounter *cc) 175 + { 176 + return arch_timer_read_counter(); 177 + } 178 + 179 + static struct clocksource clocksource_counter = { 180 + .name = "arch_sys_counter", 181 + .rating = 400, 182 + .read = arch_counter_read, 183 + .mask = CLOCKSOURCE_MASK(56), 184 + .flags = CLOCK_SOURCE_IS_CONTINUOUS, 185 + }; 186 + 187 + static struct cyclecounter cyclecounter __ro_after_init = { 188 + .read = arch_counter_read_cc, 189 + .mask = CLOCKSOURCE_MASK(56), 190 + }; 191 + 192 + struct ate_acpi_oem_info { 193 + char oem_id[ACPI_OEM_ID_SIZE + 1]; 194 + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; 195 + u32 oem_revision; 196 + }; 91 197 92 198 #ifdef CONFIG_FSL_ERRATUM_A008585 93 199 /* ··· 262 170 { 263 171 return __hisi_161010101_read_reg(cntvct_el0); 264 172 } 173 + 174 + static struct ate_acpi_oem_info hisi_161010101_oem_info[] = { 175 + /* 176 + * Note that trailing spaces are required to properly match 177 + * the OEM table information. 178 + */ 179 + { 180 + .oem_id = "HISI ", 181 + .oem_table_id = "HIP05 ", 182 + .oem_revision = 0, 183 + }, 184 + { 185 + .oem_id = "HISI ", 186 + .oem_table_id = "HIP06 ", 187 + .oem_revision = 0, 188 + }, 189 + { 190 + .oem_id = "HISI ", 191 + .oem_table_id = "HIP07 ", 192 + .oem_revision = 0, 193 + }, 194 + { /* Sentinel indicating the end of the OEM array */ }, 195 + }; 196 + #endif 197 + 198 + #ifdef CONFIG_ARM64_ERRATUM_858921 199 + static u64 notrace arm64_858921_read_cntvct_el0(void) 200 + { 201 + u64 old, new; 202 + 203 + old = read_sysreg(cntvct_el0); 204 + new = read_sysreg(cntvct_el0); 205 + return (((old ^ new) >> 32) & 1) ? old : new; 206 + } 265 207 #endif 266 208 267 209 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND 268 - const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround = NULL; 210 + DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, 211 + timer_unstable_counter_workaround); 269 212 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); 270 213 271 214 DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled); 272 215 EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled); 273 216 217 + static void erratum_set_next_event_tval_generic(const int access, unsigned long evt, 218 + struct clock_event_device *clk) 219 + { 220 + unsigned long ctrl; 221 + u64 cval = evt + arch_counter_get_cntvct(); 222 + 223 + ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 224 + ctrl |= ARCH_TIMER_CTRL_ENABLE; 225 + ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 226 + 227 + if (access == ARCH_TIMER_PHYS_ACCESS) 228 + write_sysreg(cval, cntp_cval_el0); 229 + else 230 + write_sysreg(cval, cntv_cval_el0); 231 + 232 + arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 233 + } 234 + 235 + static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt, 236 + struct clock_event_device *clk) 237 + { 238 + erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk); 239 + return 0; 240 + } 241 + 242 + static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt, 243 + struct clock_event_device *clk) 244 + { 245 + erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk); 246 + return 0; 247 + } 248 + 274 249 static const struct arch_timer_erratum_workaround ool_workarounds[] = { 275 250 #ifdef CONFIG_FSL_ERRATUM_A008585 276 251 { 252 + .match_type = ate_match_dt, 277 253 .id = "fsl,erratum-a008585", 254 + .desc = "Freescale erratum a005858", 278 255 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0, 279 256 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0, 280 257 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0, 258 + .set_next_event_phys = erratum_set_next_event_tval_phys, 259 + .set_next_event_virt = erratum_set_next_event_tval_virt, 281 260 }, 282 261 #endif 283 262 #ifdef CONFIG_HISILICON_ERRATUM_161010101 284 263 { 264 + .match_type = ate_match_dt, 285 265 .id = "hisilicon,erratum-161010101", 266 + .desc = "HiSilicon erratum 161010101", 286 267 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, 287 268 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, 288 269 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, 270 + .set_next_event_phys = erratum_set_next_event_tval_phys, 271 + .set_next_event_virt = erratum_set_next_event_tval_virt, 272 + }, 273 + { 274 + .match_type = ate_match_acpi_oem_info, 275 + .id = hisi_161010101_oem_info, 276 + .desc = "HiSilicon erratum 161010101", 277 + .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, 278 + .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, 279 + .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, 280 + .set_next_event_phys = erratum_set_next_event_tval_phys, 281 + .set_next_event_virt = erratum_set_next_event_tval_virt, 282 + }, 283 + #endif 284 + #ifdef CONFIG_ARM64_ERRATUM_858921 285 + { 286 + .match_type = ate_match_local_cap_id, 287 + .id = (void *)ARM64_WORKAROUND_858921, 288 + .desc = "ARM erratum 858921", 289 + .read_cntvct_el0 = arm64_858921_read_cntvct_el0, 289 290 }, 290 291 #endif 291 292 }; 293 + 294 + typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, 295 + const void *); 296 + 297 + static 298 + bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa, 299 + const void *arg) 300 + { 301 + const struct device_node *np = arg; 302 + 303 + return of_property_read_bool(np, wa->id); 304 + } 305 + 306 + static 307 + bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa, 308 + const void *arg) 309 + { 310 + return this_cpu_has_cap((uintptr_t)wa->id); 311 + } 312 + 313 + 314 + static 315 + bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa, 316 + const void *arg) 317 + { 318 + static const struct ate_acpi_oem_info empty_oem_info = {}; 319 + const struct ate_acpi_oem_info *info = wa->id; 320 + const struct acpi_table_header *table = arg; 321 + 322 + /* Iterate over the ACPI OEM info array, looking for a match */ 323 + while (memcmp(info, &empty_oem_info, sizeof(*info))) { 324 + if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) && 325 + !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && 326 + info->oem_revision == table->oem_revision) 327 + return true; 328 + 329 + info++; 330 + } 331 + 332 + return false; 333 + } 334 + 335 + static const struct arch_timer_erratum_workaround * 336 + arch_timer_iterate_errata(enum arch_timer_erratum_match_type type, 337 + ate_match_fn_t match_fn, 338 + void *arg) 339 + { 340 + int i; 341 + 342 + for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) { 343 + if (ool_workarounds[i].match_type != type) 344 + continue; 345 + 346 + if (match_fn(&ool_workarounds[i], arg)) 347 + return &ool_workarounds[i]; 348 + } 349 + 350 + return NULL; 351 + } 352 + 353 + static 354 + void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa, 355 + bool local) 356 + { 357 + int i; 358 + 359 + if (local) { 360 + __this_cpu_write(timer_unstable_counter_workaround, wa); 361 + } else { 362 + for_each_possible_cpu(i) 363 + per_cpu(timer_unstable_counter_workaround, i) = wa; 364 + } 365 + 366 + static_branch_enable(&arch_timer_read_ool_enabled); 367 + 368 + /* 369 + * Don't use the vdso fastpath if errata require using the 370 + * out-of-line counter accessor. We may change our mind pretty 371 + * late in the game (with a per-CPU erratum, for example), so 372 + * change both the default value and the vdso itself. 373 + */ 374 + if (wa->read_cntvct_el0) { 375 + clocksource_counter.archdata.vdso_direct = false; 376 + vdso_default = false; 377 + } 378 + } 379 + 380 + static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type, 381 + void *arg) 382 + { 383 + const struct arch_timer_erratum_workaround *wa; 384 + ate_match_fn_t match_fn = NULL; 385 + bool local = false; 386 + 387 + switch (type) { 388 + case ate_match_dt: 389 + match_fn = arch_timer_check_dt_erratum; 390 + break; 391 + case ate_match_local_cap_id: 392 + match_fn = arch_timer_check_local_cap_erratum; 393 + local = true; 394 + break; 395 + case ate_match_acpi_oem_info: 396 + match_fn = arch_timer_check_acpi_oem_erratum; 397 + break; 398 + default: 399 + WARN_ON(1); 400 + return; 401 + } 402 + 403 + wa = arch_timer_iterate_errata(type, match_fn, arg); 404 + if (!wa) 405 + return; 406 + 407 + if (needs_unstable_timer_counter_workaround()) { 408 + const struct arch_timer_erratum_workaround *__wa; 409 + __wa = __this_cpu_read(timer_unstable_counter_workaround); 410 + if (__wa && wa != __wa) 411 + pr_warn("Can't enable workaround for %s (clashes with %s\n)", 412 + wa->desc, __wa->desc); 413 + 414 + if (__wa) 415 + return; 416 + } 417 + 418 + arch_timer_enable_workaround(wa, local); 419 + pr_info("Enabling %s workaround for %s\n", 420 + local ? "local" : "global", wa->desc); 421 + } 422 + 423 + #define erratum_handler(fn, r, ...) \ 424 + ({ \ 425 + bool __val; \ 426 + if (needs_unstable_timer_counter_workaround()) { \ 427 + const struct arch_timer_erratum_workaround *__wa; \ 428 + __wa = __this_cpu_read(timer_unstable_counter_workaround); \ 429 + if (__wa && __wa->fn) { \ 430 + r = __wa->fn(__VA_ARGS__); \ 431 + __val = true; \ 432 + } else { \ 433 + __val = false; \ 434 + } \ 435 + } else { \ 436 + __val = false; \ 437 + } \ 438 + __val; \ 439 + }) 440 + 441 + static bool arch_timer_this_cpu_has_cntvct_wa(void) 442 + { 443 + const struct arch_timer_erratum_workaround *wa; 444 + 445 + wa = __this_cpu_read(timer_unstable_counter_workaround); 446 + return wa && wa->read_cntvct_el0; 447 + } 448 + #else 449 + #define arch_timer_check_ool_workaround(t,a) do { } while(0) 450 + #define erratum_set_next_event_tval_virt(...) ({BUG(); 0;}) 451 + #define erratum_set_next_event_tval_phys(...) ({BUG(); 0;}) 452 + #define erratum_handler(fn, r, ...) ({false;}) 453 + #define arch_timer_this_cpu_has_cntvct_wa() ({false;}) 292 454 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */ 293 - 294 - static __always_inline 295 - void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, 296 - struct clock_event_device *clk) 297 - { 298 - if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 299 - struct arch_timer *timer = to_arch_timer(clk); 300 - switch (reg) { 301 - case ARCH_TIMER_REG_CTRL: 302 - writel_relaxed(val, timer->base + CNTP_CTL); 303 - break; 304 - case ARCH_TIMER_REG_TVAL: 305 - writel_relaxed(val, timer->base + CNTP_TVAL); 306 - break; 307 - } 308 - } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 309 - struct arch_timer *timer = to_arch_timer(clk); 310 - switch (reg) { 311 - case ARCH_TIMER_REG_CTRL: 312 - writel_relaxed(val, timer->base + CNTV_CTL); 313 - break; 314 - case ARCH_TIMER_REG_TVAL: 315 - writel_relaxed(val, timer->base + CNTV_TVAL); 316 - break; 317 - } 318 - } else { 319 - arch_timer_reg_write_cp15(access, reg, val); 320 - } 321 - } 322 - 323 - static __always_inline 324 - u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, 325 - struct clock_event_device *clk) 326 - { 327 - u32 val; 328 - 329 - if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 330 - struct arch_timer *timer = to_arch_timer(clk); 331 - switch (reg) { 332 - case ARCH_TIMER_REG_CTRL: 333 - val = readl_relaxed(timer->base + CNTP_CTL); 334 - break; 335 - case ARCH_TIMER_REG_TVAL: 336 - val = readl_relaxed(timer->base + CNTP_TVAL); 337 - break; 338 - } 339 - } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 340 - struct arch_timer *timer = to_arch_timer(clk); 341 - switch (reg) { 342 - case ARCH_TIMER_REG_CTRL: 343 - val = readl_relaxed(timer->base + CNTV_CTL); 344 - break; 345 - case ARCH_TIMER_REG_TVAL: 346 - val = readl_relaxed(timer->base + CNTV_TVAL); 347 - break; 348 - } 349 - } else { 350 - val = arch_timer_reg_read_cp15(access, reg); 351 - } 352 - 353 - return val; 354 - } 355 455 356 456 static __always_inline irqreturn_t timer_handler(const int access, 357 457 struct clock_event_device *evt) ··· 632 348 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 633 349 } 634 350 635 - #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND 636 - static __always_inline void erratum_set_next_event_generic(const int access, 637 - unsigned long evt, struct clock_event_device *clk) 638 - { 639 - unsigned long ctrl; 640 - u64 cval = evt + arch_counter_get_cntvct(); 641 - 642 - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 643 - ctrl |= ARCH_TIMER_CTRL_ENABLE; 644 - ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 645 - 646 - if (access == ARCH_TIMER_PHYS_ACCESS) 647 - write_sysreg(cval, cntp_cval_el0); 648 - else if (access == ARCH_TIMER_VIRT_ACCESS) 649 - write_sysreg(cval, cntv_cval_el0); 650 - 651 - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 652 - } 653 - 654 - static int erratum_set_next_event_virt(unsigned long evt, 655 - struct clock_event_device *clk) 656 - { 657 - erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk); 658 - return 0; 659 - } 660 - 661 - static int erratum_set_next_event_phys(unsigned long evt, 662 - struct clock_event_device *clk) 663 - { 664 - erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk); 665 - return 0; 666 - } 667 - #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */ 668 - 669 351 static int arch_timer_set_next_event_virt(unsigned long evt, 670 352 struct clock_event_device *clk) 671 353 { 354 + int ret; 355 + 356 + if (erratum_handler(set_next_event_virt, ret, evt, clk)) 357 + return ret; 358 + 672 359 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); 673 360 return 0; 674 361 } ··· 647 392 static int arch_timer_set_next_event_phys(unsigned long evt, 648 393 struct clock_event_device *clk) 649 394 { 395 + int ret; 396 + 397 + if (erratum_handler(set_next_event_phys, ret, evt, clk)) 398 + return ret; 399 + 650 400 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); 651 401 return 0; 652 402 } ··· 670 410 return 0; 671 411 } 672 412 673 - static void erratum_workaround_set_sne(struct clock_event_device *clk) 674 - { 675 - #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND 676 - if (!static_branch_unlikely(&arch_timer_read_ool_enabled)) 677 - return; 678 - 679 - if (arch_timer_uses_ppi == VIRT_PPI) 680 - clk->set_next_event = erratum_set_next_event_virt; 681 - else 682 - clk->set_next_event = erratum_set_next_event_phys; 683 - #endif 684 - } 685 - 686 413 static void __arch_timer_setup(unsigned type, 687 414 struct clock_event_device *clk) 688 415 { 689 416 clk->features = CLOCK_EVT_FEAT_ONESHOT; 690 417 691 - if (type == ARCH_CP15_TIMER) { 418 + if (type == ARCH_TIMER_TYPE_CP15) { 692 419 if (arch_timer_c3stop) 693 420 clk->features |= CLOCK_EVT_FEAT_C3STOP; 694 421 clk->name = "arch_sys_timer"; ··· 683 436 clk->cpumask = cpumask_of(smp_processor_id()); 684 437 clk->irq = arch_timer_ppi[arch_timer_uses_ppi]; 685 438 switch (arch_timer_uses_ppi) { 686 - case VIRT_PPI: 439 + case ARCH_TIMER_VIRT_PPI: 687 440 clk->set_state_shutdown = arch_timer_shutdown_virt; 688 441 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt; 689 442 clk->set_next_event = arch_timer_set_next_event_virt; 690 443 break; 691 - case PHYS_SECURE_PPI: 692 - case PHYS_NONSECURE_PPI: 693 - case HYP_PPI: 444 + case ARCH_TIMER_PHYS_SECURE_PPI: 445 + case ARCH_TIMER_PHYS_NONSECURE_PPI: 446 + case ARCH_TIMER_HYP_PPI: 694 447 clk->set_state_shutdown = arch_timer_shutdown_phys; 695 448 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys; 696 449 clk->set_next_event = arch_timer_set_next_event_phys; ··· 699 452 BUG(); 700 453 } 701 454 702 - erratum_workaround_set_sne(clk); 455 + arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL); 703 456 } else { 704 457 clk->features |= CLOCK_EVT_FEAT_DYNIRQ; 705 458 clk->name = "arch_mem_timer"; ··· 755 508 { 756 509 u32 cntkctl = arch_timer_get_cntkctl(); 757 510 758 - /* Disable user access to the timers and the physical counter */ 511 + /* Disable user access to the timers and both counters */ 759 512 /* Also disable virtual event stream */ 760 513 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN 761 514 | ARCH_TIMER_USR_VT_ACCESS_EN 515 + | ARCH_TIMER_USR_VCT_ACCESS_EN 762 516 | ARCH_TIMER_VIRT_EVT_EN 763 517 | ARCH_TIMER_USR_PCT_ACCESS_EN); 764 518 765 - /* Enable user access to the virtual counter */ 766 - cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; 519 + /* 520 + * Enable user access to the virtual counter if it doesn't 521 + * need to be workaround. The vdso may have been already 522 + * disabled though. 523 + */ 524 + if (arch_timer_this_cpu_has_cntvct_wa()) 525 + pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id()); 526 + else 527 + cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; 767 528 768 529 arch_timer_set_cntkctl(cntkctl); 769 530 } 770 531 771 532 static bool arch_timer_has_nonsecure_ppi(void) 772 533 { 773 - return (arch_timer_uses_ppi == PHYS_SECURE_PPI && 774 - arch_timer_ppi[PHYS_NONSECURE_PPI]); 534 + return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI && 535 + arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]); 775 536 } 776 537 777 538 static u32 check_ppi_trigger(int irq) ··· 800 545 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); 801 546 u32 flags; 802 547 803 - __arch_timer_setup(ARCH_CP15_TIMER, clk); 548 + __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk); 804 549 805 550 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]); 806 551 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags); 807 552 808 553 if (arch_timer_has_nonsecure_ppi()) { 809 - flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]); 810 - enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags); 554 + flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]); 555 + enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI], 556 + flags); 811 557 } 812 558 813 559 arch_counter_set_user_access(); ··· 818 562 return 0; 819 563 } 820 564 821 - static void 822 - arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np) 565 + /* 566 + * For historical reasons, when probing with DT we use whichever (non-zero) 567 + * rate was probed first, and don't verify that others match. If the first node 568 + * probed has a clock-frequency property, this overrides the HW register. 569 + */ 570 + static void arch_timer_of_configure_rate(u32 rate, struct device_node *np) 823 571 { 824 572 /* Who has more than one independent system counter? */ 825 573 if (arch_timer_rate) 826 574 return; 827 575 828 - /* 829 - * Try to determine the frequency from the device tree or CNTFRQ, 830 - * if ACPI is enabled, get the frequency from CNTFRQ ONLY. 831 - */ 832 - if (!acpi_disabled || 833 - of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) { 834 - if (cntbase) 835 - arch_timer_rate = readl_relaxed(cntbase + CNTFRQ); 836 - else 837 - arch_timer_rate = arch_timer_get_cntfrq(); 838 - } 576 + if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) 577 + arch_timer_rate = rate; 839 578 840 579 /* Check the timer frequency. */ 841 580 if (arch_timer_rate == 0) 842 - pr_warn("Architected timer frequency not available\n"); 581 + pr_warn("frequency not available\n"); 843 582 } 844 583 845 584 static void arch_timer_banner(unsigned type) 846 585 { 847 - pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", 848 - type & ARCH_CP15_TIMER ? "cp15" : "", 849 - type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "", 850 - type & ARCH_MEM_TIMER ? "mmio" : "", 851 - (unsigned long)arch_timer_rate / 1000000, 852 - (unsigned long)(arch_timer_rate / 10000) % 100, 853 - type & ARCH_CP15_TIMER ? 854 - (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" : 586 + pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", 587 + type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "", 588 + type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? 589 + " and " : "", 590 + type & ARCH_TIMER_TYPE_MEM ? "mmio" : "", 591 + (unsigned long)arch_timer_rate / 1000000, 592 + (unsigned long)(arch_timer_rate / 10000) % 100, 593 + type & ARCH_TIMER_TYPE_CP15 ? 594 + (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" : 855 595 "", 856 - type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "", 857 - type & ARCH_MEM_TIMER ? 596 + type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "", 597 + type & ARCH_TIMER_TYPE_MEM ? 858 598 arch_timer_mem_use_virtual ? "virt" : "phys" : 859 599 ""); 860 600 } ··· 873 621 return ((u64) vct_hi << 32) | vct_lo; 874 622 } 875 623 876 - /* 877 - * Default to cp15 based access because arm64 uses this function for 878 - * sched_clock() before DT is probed and the cp15 method is guaranteed 879 - * to exist on arm64. arm doesn't use this before DT is probed so even 880 - * if we don't have the cp15 accessors we won't have a problem. 881 - */ 882 - u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; 883 - 884 - static u64 arch_counter_read(struct clocksource *cs) 885 - { 886 - return arch_timer_read_counter(); 887 - } 888 - 889 - static u64 arch_counter_read_cc(const struct cyclecounter *cc) 890 - { 891 - return arch_timer_read_counter(); 892 - } 893 - 894 - static struct clocksource clocksource_counter = { 895 - .name = "arch_sys_counter", 896 - .rating = 400, 897 - .read = arch_counter_read, 898 - .mask = CLOCKSOURCE_MASK(56), 899 - .flags = CLOCK_SOURCE_IS_CONTINUOUS, 900 - }; 901 - 902 - static struct cyclecounter cyclecounter __ro_after_init = { 903 - .read = arch_counter_read_cc, 904 - .mask = CLOCKSOURCE_MASK(56), 905 - }; 906 - 907 624 static struct arch_timer_kvm_info arch_timer_kvm_info; 908 625 909 626 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void) ··· 885 664 u64 start_count; 886 665 887 666 /* Register the CP15 based counter if we have one */ 888 - if (type & ARCH_CP15_TIMER) { 889 - if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI) 667 + if (type & ARCH_TIMER_TYPE_CP15) { 668 + if (IS_ENABLED(CONFIG_ARM64) || 669 + arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) 890 670 arch_timer_read_counter = arch_counter_get_cntvct; 891 671 else 892 672 arch_timer_read_counter = arch_counter_get_cntpct; 893 673 894 - clocksource_counter.archdata.vdso_direct = true; 895 - 896 - #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND 897 - /* 898 - * Don't use the vdso fastpath if errata require using 899 - * the out-of-line counter accessor. 900 - */ 901 - if (static_branch_unlikely(&arch_timer_read_ool_enabled)) 902 - clocksource_counter.archdata.vdso_direct = false; 903 - #endif 674 + clocksource_counter.archdata.vdso_direct = vdso_default; 904 675 } else { 905 676 arch_timer_read_counter = arch_counter_get_cntvct_mem; 906 677 } ··· 912 699 913 700 static void arch_timer_stop(struct clock_event_device *clk) 914 701 { 915 - pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", 916 - clk->irq, smp_processor_id()); 702 + pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id()); 917 703 918 704 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]); 919 705 if (arch_timer_has_nonsecure_ppi()) 920 - disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); 706 + disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]); 921 707 922 708 clk->set_state_shutdown(clk); 923 709 } ··· 930 718 } 931 719 932 720 #ifdef CONFIG_CPU_PM 933 - static unsigned int saved_cntkctl; 721 + static DEFINE_PER_CPU(unsigned long, saved_cntkctl); 934 722 static int arch_timer_cpu_pm_notify(struct notifier_block *self, 935 723 unsigned long action, void *hcpu) 936 724 { 937 725 if (action == CPU_PM_ENTER) 938 - saved_cntkctl = arch_timer_get_cntkctl(); 726 + __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl()); 939 727 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) 940 - arch_timer_set_cntkctl(saved_cntkctl); 728 + arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl)); 941 729 return NOTIFY_OK; 942 730 } 943 731 ··· 979 767 980 768 ppi = arch_timer_ppi[arch_timer_uses_ppi]; 981 769 switch (arch_timer_uses_ppi) { 982 - case VIRT_PPI: 770 + case ARCH_TIMER_VIRT_PPI: 983 771 err = request_percpu_irq(ppi, arch_timer_handler_virt, 984 772 "arch_timer", arch_timer_evt); 985 773 break; 986 - case PHYS_SECURE_PPI: 987 - case PHYS_NONSECURE_PPI: 774 + case ARCH_TIMER_PHYS_SECURE_PPI: 775 + case ARCH_TIMER_PHYS_NONSECURE_PPI: 988 776 err = request_percpu_irq(ppi, arch_timer_handler_phys, 989 777 "arch_timer", arch_timer_evt); 990 - if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { 991 - ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; 778 + if (!err && arch_timer_has_nonsecure_ppi()) { 779 + ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]; 992 780 err = request_percpu_irq(ppi, arch_timer_handler_phys, 993 781 "arch_timer", arch_timer_evt); 994 782 if (err) 995 - free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 783 + free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI], 996 784 arch_timer_evt); 997 785 } 998 786 break; 999 - case HYP_PPI: 787 + case ARCH_TIMER_HYP_PPI: 1000 788 err = request_percpu_irq(ppi, arch_timer_handler_phys, 1001 789 "arch_timer", arch_timer_evt); 1002 790 break; ··· 1005 793 } 1006 794 1007 795 if (err) { 1008 - pr_err("arch_timer: can't register interrupt %d (%d)\n", 1009 - ppi, err); 796 + pr_err("can't register interrupt %d (%d)\n", ppi, err); 1010 797 goto out_free; 1011 798 } 1012 799 ··· 1028 817 out_unreg_notify: 1029 818 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt); 1030 819 if (arch_timer_has_nonsecure_ppi()) 1031 - free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 820 + free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI], 1032 821 arch_timer_evt); 1033 822 1034 823 out_free: ··· 1049 838 1050 839 t->base = base; 1051 840 t->evt.irq = irq; 1052 - __arch_timer_setup(ARCH_MEM_TIMER, &t->evt); 841 + __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt); 1053 842 1054 843 if (arch_timer_mem_use_virtual) 1055 844 func = arch_timer_handler_virt_mem; ··· 1058 847 1059 848 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt); 1060 849 if (ret) { 1061 - pr_err("arch_timer: Failed to request mem timer irq\n"); 850 + pr_err("Failed to request mem timer irq\n"); 1062 851 kfree(t); 1063 852 } 1064 853 ··· 1076 865 {}, 1077 866 }; 1078 867 1079 - static bool __init 1080 - arch_timer_needs_probing(int type, const struct of_device_id *matches) 868 + static bool __init arch_timer_needs_of_probing(void) 1081 869 { 1082 870 struct device_node *dn; 1083 871 bool needs_probing = false; 872 + unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM; 1084 873 1085 - dn = of_find_matching_node(NULL, matches); 1086 - if (dn && of_device_is_available(dn) && !(arch_timers_present & type)) 874 + /* We have two timers, and both device-tree nodes are probed. */ 875 + if ((arch_timers_present & mask) == mask) 876 + return false; 877 + 878 + /* 879 + * Only one type of timer is probed, 880 + * check if we have another type of timer node in device-tree. 881 + */ 882 + if (arch_timers_present & ARCH_TIMER_TYPE_CP15) 883 + dn = of_find_matching_node(NULL, arch_timer_mem_of_match); 884 + else 885 + dn = of_find_matching_node(NULL, arch_timer_of_match); 886 + 887 + if (dn && of_device_is_available(dn)) 1087 888 needs_probing = true; 889 + 1088 890 of_node_put(dn); 1089 891 1090 892 return needs_probing; ··· 1105 881 1106 882 static int __init arch_timer_common_init(void) 1107 883 { 1108 - unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER; 1109 - 1110 - /* Wait until both nodes are probed if we have two timers */ 1111 - if ((arch_timers_present & mask) != mask) { 1112 - if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match)) 1113 - return 0; 1114 - if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match)) 1115 - return 0; 1116 - } 1117 - 1118 884 arch_timer_banner(arch_timers_present); 1119 885 arch_counter_register(arch_timers_present); 1120 886 return arch_timer_arch_init(); 1121 887 } 1122 888 1123 - static int __init arch_timer_init(void) 889 + /** 890 + * arch_timer_select_ppi() - Select suitable PPI for the current system. 891 + * 892 + * If HYP mode is available, we know that the physical timer 893 + * has been configured to be accessible from PL1. Use it, so 894 + * that a guest can use the virtual timer instead. 895 + * 896 + * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE 897 + * accesses to CNTP_*_EL1 registers are silently redirected to 898 + * their CNTHP_*_EL2 counterparts, and use a different PPI 899 + * number. 900 + * 901 + * If no interrupt provided for virtual timer, we'll have to 902 + * stick to the physical timer. It'd better be accessible... 903 + * For arm64 we never use the secure interrupt. 904 + * 905 + * Return: a suitable PPI type for the current system. 906 + */ 907 + static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void) 1124 908 { 1125 - int ret; 1126 - /* 1127 - * If HYP mode is available, we know that the physical timer 1128 - * has been configured to be accessible from PL1. Use it, so 1129 - * that a guest can use the virtual timer instead. 1130 - * 1131 - * If no interrupt provided for virtual timer, we'll have to 1132 - * stick to the physical timer. It'd better be accessible... 1133 - * 1134 - * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE 1135 - * accesses to CNTP_*_EL1 registers are silently redirected to 1136 - * their CNTHP_*_EL2 counterparts, and use a different PPI 1137 - * number. 1138 - */ 1139 - if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) { 1140 - bool has_ppi; 909 + if (is_kernel_in_hyp_mode()) 910 + return ARCH_TIMER_HYP_PPI; 1141 911 1142 - if (is_kernel_in_hyp_mode()) { 1143 - arch_timer_uses_ppi = HYP_PPI; 1144 - has_ppi = !!arch_timer_ppi[HYP_PPI]; 1145 - } else { 1146 - arch_timer_uses_ppi = PHYS_SECURE_PPI; 1147 - has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] || 1148 - !!arch_timer_ppi[PHYS_NONSECURE_PPI]); 1149 - } 912 + if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI]) 913 + return ARCH_TIMER_VIRT_PPI; 1150 914 1151 - if (!has_ppi) { 1152 - pr_warn("arch_timer: No interrupt available, giving up\n"); 1153 - return -EINVAL; 1154 - } 1155 - } 915 + if (IS_ENABLED(CONFIG_ARM64)) 916 + return ARCH_TIMER_PHYS_NONSECURE_PPI; 1156 917 1157 - ret = arch_timer_register(); 1158 - if (ret) 1159 - return ret; 1160 - 1161 - ret = arch_timer_common_init(); 1162 - if (ret) 1163 - return ret; 1164 - 1165 - arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI]; 1166 - 1167 - return 0; 918 + return ARCH_TIMER_PHYS_SECURE_PPI; 1168 919 } 1169 920 1170 921 static int __init arch_timer_of_init(struct device_node *np) 1171 922 { 1172 - int i; 923 + int i, ret; 924 + u32 rate; 1173 925 1174 - if (arch_timers_present & ARCH_CP15_TIMER) { 1175 - pr_warn("arch_timer: multiple nodes in dt, skipping\n"); 926 + if (arch_timers_present & ARCH_TIMER_TYPE_CP15) { 927 + pr_warn("multiple nodes in dt, skipping\n"); 1176 928 return 0; 1177 929 } 1178 930 1179 - arch_timers_present |= ARCH_CP15_TIMER; 1180 - for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) 931 + arch_timers_present |= ARCH_TIMER_TYPE_CP15; 932 + for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) 1181 933 arch_timer_ppi[i] = irq_of_parse_and_map(np, i); 1182 934 1183 - arch_timer_detect_rate(NULL, np); 935 + arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI]; 936 + 937 + rate = arch_timer_get_cntfrq(); 938 + arch_timer_of_configure_rate(rate, np); 1184 939 1185 940 arch_timer_c3stop = !of_property_read_bool(np, "always-on"); 1186 941 1187 - #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND 1188 - for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) { 1189 - if (of_property_read_bool(np, ool_workarounds[i].id)) { 1190 - timer_unstable_counter_workaround = &ool_workarounds[i]; 1191 - static_branch_enable(&arch_timer_read_ool_enabled); 1192 - pr_info("arch_timer: Enabling workaround for %s\n", 1193 - timer_unstable_counter_workaround->id); 1194 - break; 1195 - } 1196 - } 1197 - #endif 942 + /* Check for globally applicable workarounds */ 943 + arch_timer_check_ool_workaround(ate_match_dt, np); 1198 944 1199 945 /* 1200 946 * If we cannot rely on firmware initializing the timer registers then ··· 1172 978 */ 1173 979 if (IS_ENABLED(CONFIG_ARM) && 1174 980 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured")) 1175 - arch_timer_uses_ppi = PHYS_SECURE_PPI; 981 + arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI; 982 + else 983 + arch_timer_uses_ppi = arch_timer_select_ppi(); 984 + 985 + if (!arch_timer_ppi[arch_timer_uses_ppi]) { 986 + pr_err("No interrupt available, giving up\n"); 987 + return -EINVAL; 988 + } 1176 989 1177 990 /* On some systems, the counter stops ticking when in suspend. */ 1178 991 arch_counter_suspend_stop = of_property_read_bool(np, 1179 992 "arm,no-tick-in-suspend"); 1180 993 1181 - return arch_timer_init(); 994 + ret = arch_timer_register(); 995 + if (ret) 996 + return ret; 997 + 998 + if (arch_timer_needs_of_probing()) 999 + return 0; 1000 + 1001 + return arch_timer_common_init(); 1182 1002 } 1183 1003 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init); 1184 1004 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init); 1185 1005 1186 - static int __init arch_timer_mem_init(struct device_node *np) 1006 + static u32 __init 1007 + arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame) 1187 1008 { 1188 - struct device_node *frame, *best_frame = NULL; 1189 - void __iomem *cntctlbase, *base; 1190 - unsigned int irq, ret = -EINVAL; 1191 - u32 cnttidr; 1009 + void __iomem *base; 1010 + u32 rate; 1192 1011 1193 - arch_timers_present |= ARCH_MEM_TIMER; 1194 - cntctlbase = of_iomap(np, 0); 1012 + base = ioremap(frame->cntbase, frame->size); 1013 + if (!base) { 1014 + pr_err("Unable to map frame @ %pa\n", &frame->cntbase); 1015 + return 0; 1016 + } 1017 + 1018 + rate = readl_relaxed(frame + CNTFRQ); 1019 + 1020 + iounmap(frame); 1021 + 1022 + return rate; 1023 + } 1024 + 1025 + static struct arch_timer_mem_frame * __init 1026 + arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem) 1027 + { 1028 + struct arch_timer_mem_frame *frame, *best_frame = NULL; 1029 + void __iomem *cntctlbase; 1030 + u32 cnttidr; 1031 + int i; 1032 + 1033 + cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size); 1195 1034 if (!cntctlbase) { 1196 - pr_err("arch_timer: Can't find CNTCTLBase\n"); 1197 - return -ENXIO; 1035 + pr_err("Can't map CNTCTLBase @ %pa\n", 1036 + &timer_mem->cntctlbase); 1037 + return NULL; 1198 1038 } 1199 1039 1200 1040 cnttidr = readl_relaxed(cntctlbase + CNTTIDR); ··· 1237 1009 * Try to find a virtual capable frame. Otherwise fall back to a 1238 1010 * physical capable frame. 1239 1011 */ 1240 - for_each_available_child_of_node(np, frame) { 1241 - int n; 1242 - u32 cntacr; 1012 + for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) { 1013 + u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT | 1014 + CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT; 1243 1015 1244 - if (of_property_read_u32(frame, "frame-number", &n)) { 1245 - pr_err("arch_timer: Missing frame-number\n"); 1246 - of_node_put(frame); 1247 - goto out; 1248 - } 1016 + frame = &timer_mem->frame[i]; 1017 + if (!frame->valid) 1018 + continue; 1249 1019 1250 1020 /* Try enabling everything, and see what sticks */ 1251 - cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT | 1252 - CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT; 1253 - writel_relaxed(cntacr, cntctlbase + CNTACR(n)); 1254 - cntacr = readl_relaxed(cntctlbase + CNTACR(n)); 1021 + writel_relaxed(cntacr, cntctlbase + CNTACR(i)); 1022 + cntacr = readl_relaxed(cntctlbase + CNTACR(i)); 1255 1023 1256 - if ((cnttidr & CNTTIDR_VIRT(n)) && 1024 + if ((cnttidr & CNTTIDR_VIRT(i)) && 1257 1025 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) { 1258 - of_node_put(best_frame); 1259 1026 best_frame = frame; 1260 1027 arch_timer_mem_use_virtual = true; 1261 1028 break; ··· 1259 1036 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT)) 1260 1037 continue; 1261 1038 1262 - of_node_put(best_frame); 1263 - best_frame = of_node_get(frame); 1039 + best_frame = frame; 1264 1040 } 1265 1041 1266 - ret= -ENXIO; 1267 - base = arch_counter_base = of_io_request_and_map(best_frame, 0, 1268 - "arch_mem_timer"); 1269 - if (IS_ERR(base)) { 1270 - pr_err("arch_timer: Can't map frame's registers\n"); 1271 - goto out; 1272 - } 1042 + iounmap(cntctlbase); 1043 + 1044 + if (!best_frame) 1045 + pr_err("Unable to find a suitable frame in timer @ %pa\n", 1046 + &timer_mem->cntctlbase); 1047 + 1048 + return frame; 1049 + } 1050 + 1051 + static int __init 1052 + arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame) 1053 + { 1054 + void __iomem *base; 1055 + int ret, irq = 0; 1273 1056 1274 1057 if (arch_timer_mem_use_virtual) 1275 - irq = irq_of_parse_and_map(best_frame, 1); 1058 + irq = frame->virt_irq; 1276 1059 else 1277 - irq = irq_of_parse_and_map(best_frame, 0); 1060 + irq = frame->phys_irq; 1278 1061 1279 - ret = -EINVAL; 1280 1062 if (!irq) { 1281 - pr_err("arch_timer: Frame missing %s irq", 1063 + pr_err("Frame missing %s irq.\n", 1282 1064 arch_timer_mem_use_virtual ? "virt" : "phys"); 1283 - goto out; 1284 - } 1285 - 1286 - arch_timer_detect_rate(base, np); 1287 - ret = arch_timer_mem_register(base, irq); 1288 - if (ret) 1289 - goto out; 1290 - 1291 - return arch_timer_common_init(); 1292 - out: 1293 - iounmap(cntctlbase); 1294 - of_node_put(best_frame); 1295 - return ret; 1296 - } 1297 - CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", 1298 - arch_timer_mem_init); 1299 - 1300 - #ifdef CONFIG_ACPI 1301 - static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags) 1302 - { 1303 - int trigger, polarity; 1304 - 1305 - if (!interrupt) 1306 - return 0; 1307 - 1308 - trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE 1309 - : ACPI_LEVEL_SENSITIVE; 1310 - 1311 - polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW 1312 - : ACPI_ACTIVE_HIGH; 1313 - 1314 - return acpi_register_gsi(NULL, interrupt, trigger, polarity); 1315 - } 1316 - 1317 - /* Initialize per-processor generic timer */ 1318 - static int __init arch_timer_acpi_init(struct acpi_table_header *table) 1319 - { 1320 - struct acpi_table_gtdt *gtdt; 1321 - 1322 - if (arch_timers_present & ARCH_CP15_TIMER) { 1323 - pr_warn("arch_timer: already initialized, skipping\n"); 1324 1065 return -EINVAL; 1325 1066 } 1326 1067 1327 - gtdt = container_of(table, struct acpi_table_gtdt, header); 1068 + if (!request_mem_region(frame->cntbase, frame->size, 1069 + "arch_mem_timer")) 1070 + return -EBUSY; 1328 1071 1329 - arch_timers_present |= ARCH_CP15_TIMER; 1072 + base = ioremap(frame->cntbase, frame->size); 1073 + if (!base) { 1074 + pr_err("Can't map frame's registers\n"); 1075 + return -ENXIO; 1076 + } 1330 1077 1331 - arch_timer_ppi[PHYS_SECURE_PPI] = 1332 - map_generic_timer_interrupt(gtdt->secure_el1_interrupt, 1333 - gtdt->secure_el1_flags); 1078 + ret = arch_timer_mem_register(base, irq); 1079 + if (ret) { 1080 + iounmap(base); 1081 + return ret; 1082 + } 1334 1083 1335 - arch_timer_ppi[PHYS_NONSECURE_PPI] = 1336 - map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt, 1337 - gtdt->non_secure_el1_flags); 1084 + arch_counter_base = base; 1085 + arch_timers_present |= ARCH_TIMER_TYPE_MEM; 1338 1086 1339 - arch_timer_ppi[VIRT_PPI] = 1340 - map_generic_timer_interrupt(gtdt->virtual_timer_interrupt, 1341 - gtdt->virtual_timer_flags); 1087 + return 0; 1088 + } 1342 1089 1343 - arch_timer_ppi[HYP_PPI] = 1344 - map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt, 1345 - gtdt->non_secure_el2_flags); 1090 + static int __init arch_timer_mem_of_init(struct device_node *np) 1091 + { 1092 + struct arch_timer_mem *timer_mem; 1093 + struct arch_timer_mem_frame *frame; 1094 + struct device_node *frame_node; 1095 + struct resource res; 1096 + int ret = -EINVAL; 1097 + u32 rate; 1346 1098 1347 - /* Get the frequency from CNTFRQ */ 1348 - arch_timer_detect_rate(NULL, NULL); 1099 + timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL); 1100 + if (!timer_mem) 1101 + return -ENOMEM; 1102 + 1103 + if (of_address_to_resource(np, 0, &res)) 1104 + goto out; 1105 + timer_mem->cntctlbase = res.start; 1106 + timer_mem->size = resource_size(&res); 1107 + 1108 + for_each_available_child_of_node(np, frame_node) { 1109 + u32 n; 1110 + struct arch_timer_mem_frame *frame; 1111 + 1112 + if (of_property_read_u32(frame_node, "frame-number", &n)) { 1113 + pr_err(FW_BUG "Missing frame-number.\n"); 1114 + of_node_put(frame_node); 1115 + goto out; 1116 + } 1117 + if (n >= ARCH_TIMER_MEM_MAX_FRAMES) { 1118 + pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n", 1119 + ARCH_TIMER_MEM_MAX_FRAMES - 1); 1120 + of_node_put(frame_node); 1121 + goto out; 1122 + } 1123 + frame = &timer_mem->frame[n]; 1124 + 1125 + if (frame->valid) { 1126 + pr_err(FW_BUG "Duplicated frame-number.\n"); 1127 + of_node_put(frame_node); 1128 + goto out; 1129 + } 1130 + 1131 + if (of_address_to_resource(frame_node, 0, &res)) { 1132 + of_node_put(frame_node); 1133 + goto out; 1134 + } 1135 + frame->cntbase = res.start; 1136 + frame->size = resource_size(&res); 1137 + 1138 + frame->virt_irq = irq_of_parse_and_map(frame_node, 1139 + ARCH_TIMER_VIRT_SPI); 1140 + frame->phys_irq = irq_of_parse_and_map(frame_node, 1141 + ARCH_TIMER_PHYS_SPI); 1142 + 1143 + frame->valid = true; 1144 + } 1145 + 1146 + frame = arch_timer_mem_find_best_frame(timer_mem); 1147 + if (!frame) { 1148 + ret = -EINVAL; 1149 + goto out; 1150 + } 1151 + 1152 + rate = arch_timer_mem_frame_get_cntfrq(frame); 1153 + arch_timer_of_configure_rate(rate, np); 1154 + 1155 + ret = arch_timer_mem_frame_register(frame); 1156 + if (!ret && !arch_timer_needs_of_probing()) 1157 + ret = arch_timer_common_init(); 1158 + out: 1159 + kfree(timer_mem); 1160 + return ret; 1161 + } 1162 + CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", 1163 + arch_timer_mem_of_init); 1164 + 1165 + #ifdef CONFIG_ACPI_GTDT 1166 + static int __init 1167 + arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem) 1168 + { 1169 + struct arch_timer_mem_frame *frame; 1170 + u32 rate; 1171 + int i; 1172 + 1173 + for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) { 1174 + frame = &timer_mem->frame[i]; 1175 + 1176 + if (!frame->valid) 1177 + continue; 1178 + 1179 + rate = arch_timer_mem_frame_get_cntfrq(frame); 1180 + if (rate == arch_timer_rate) 1181 + continue; 1182 + 1183 + pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n", 1184 + &frame->cntbase, 1185 + (unsigned long)rate, (unsigned long)arch_timer_rate); 1186 + 1187 + return -EINVAL; 1188 + } 1189 + 1190 + return 0; 1191 + } 1192 + 1193 + static int __init arch_timer_mem_acpi_init(int platform_timer_count) 1194 + { 1195 + struct arch_timer_mem *timers, *timer; 1196 + struct arch_timer_mem_frame *frame; 1197 + int timer_count, i, ret = 0; 1198 + 1199 + timers = kcalloc(platform_timer_count, sizeof(*timers), 1200 + GFP_KERNEL); 1201 + if (!timers) 1202 + return -ENOMEM; 1203 + 1204 + ret = acpi_arch_timer_mem_init(timers, &timer_count); 1205 + if (ret || !timer_count) 1206 + goto out; 1207 + 1208 + for (i = 0; i < timer_count; i++) { 1209 + ret = arch_timer_mem_verify_cntfrq(&timers[i]); 1210 + if (ret) { 1211 + pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); 1212 + goto out; 1213 + } 1214 + } 1215 + 1216 + /* 1217 + * While unlikely, it's theoretically possible that none of the frames 1218 + * in a timer expose the combination of feature we want. 1219 + */ 1220 + for (i = i; i < timer_count; i++) { 1221 + timer = &timers[i]; 1222 + 1223 + frame = arch_timer_mem_find_best_frame(timer); 1224 + if (frame) 1225 + break; 1226 + } 1227 + 1228 + if (frame) 1229 + ret = arch_timer_mem_frame_register(frame); 1230 + out: 1231 + kfree(timers); 1232 + return ret; 1233 + } 1234 + 1235 + /* Initialize per-processor generic timer and memory-mapped timer(if present) */ 1236 + static int __init arch_timer_acpi_init(struct acpi_table_header *table) 1237 + { 1238 + int ret, platform_timer_count; 1239 + 1240 + if (arch_timers_present & ARCH_TIMER_TYPE_CP15) { 1241 + pr_warn("already initialized, skipping\n"); 1242 + return -EINVAL; 1243 + } 1244 + 1245 + arch_timers_present |= ARCH_TIMER_TYPE_CP15; 1246 + 1247 + ret = acpi_gtdt_init(table, &platform_timer_count); 1248 + if (ret) { 1249 + pr_err("Failed to init GTDT table.\n"); 1250 + return ret; 1251 + } 1252 + 1253 + arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] = 1254 + acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI); 1255 + 1256 + arch_timer_ppi[ARCH_TIMER_VIRT_PPI] = 1257 + acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI); 1258 + 1259 + arch_timer_ppi[ARCH_TIMER_HYP_PPI] = 1260 + acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI); 1261 + 1262 + arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI]; 1263 + 1264 + /* 1265 + * When probing via ACPI, we have no mechanism to override the sysreg 1266 + * CNTFRQ value. This *must* be correct. 1267 + */ 1268 + arch_timer_rate = arch_timer_get_cntfrq(); 1269 + if (!arch_timer_rate) { 1270 + pr_err(FW_BUG "frequency not available.\n"); 1271 + return -EINVAL; 1272 + } 1273 + 1274 + arch_timer_uses_ppi = arch_timer_select_ppi(); 1275 + if (!arch_timer_ppi[arch_timer_uses_ppi]) { 1276 + pr_err("No interrupt available, giving up\n"); 1277 + return -EINVAL; 1278 + } 1349 1279 1350 1280 /* Always-on capability */ 1351 - arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON); 1281 + arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi); 1352 1282 1353 - arch_timer_init(); 1354 - return 0; 1283 + /* Check for globally applicable workarounds */ 1284 + arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table); 1285 + 1286 + ret = arch_timer_register(); 1287 + if (ret) 1288 + return ret; 1289 + 1290 + if (platform_timer_count && 1291 + arch_timer_mem_acpi_init(platform_timer_count)) 1292 + pr_err("Failed to initialize memory-mapped timer.\n"); 1293 + 1294 + return arch_timer_common_init(); 1355 1295 } 1356 1296 CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init); 1357 1297 #endif
+1 -1
drivers/clocksource/asm9260_timer.c
··· 193 193 194 194 priv.base = of_io_request_and_map(np, 0, np->name); 195 195 if (IS_ERR(priv.base)) { 196 - pr_err("%s: unable to map resource", np->name); 196 + pr_err("%s: unable to map resource\n", np->name); 197 197 return PTR_ERR(priv.base); 198 198 } 199 199
+3 -3
drivers/clocksource/bcm2835_timer.c
··· 89 89 90 90 base = of_iomap(node, 0); 91 91 if (!base) { 92 - pr_err("Can't remap registers"); 92 + pr_err("Can't remap registers\n"); 93 93 return -ENXIO; 94 94 } 95 95 96 96 ret = of_property_read_u32(node, "clock-frequency", &freq); 97 97 if (ret) { 98 - pr_err("Can't read clock-frequency"); 98 + pr_err("Can't read clock-frequency\n"); 99 99 goto err_iounmap; 100 100 } 101 101 ··· 107 107 108 108 irq = irq_of_parse_and_map(node, DEFAULT_TIMER); 109 109 if (irq <= 0) { 110 - pr_err("Can't parse IRQ"); 110 + pr_err("Can't parse IRQ\n"); 111 111 ret = -EINVAL; 112 112 goto err_iounmap; 113 113 }
+1 -1
drivers/clocksource/bcm_kona_timer.c
··· 179 179 } else if (!of_property_read_u32(node, "clock-frequency", &freq)) { 180 180 arch_timer_rate = freq; 181 181 } else { 182 - pr_err("Kona Timer v1 unable to determine clock-frequency"); 182 + pr_err("Kona Timer v1 unable to determine clock-frequency\n"); 183 183 return -EINVAL; 184 184 } 185 185
+1 -1
drivers/clocksource/clksrc-probe.c
··· 40 40 41 41 ret = init_func_ret(np); 42 42 if (ret) { 43 - pr_err("Failed to initialize '%s': %d", 43 + pr_err("Failed to initialize '%s': %d\n", 44 44 of_node_full_name(np), ret); 45 45 continue; 46 46 }
+3 -1
drivers/clocksource/dw_apb_timer.c
··· 101 101 struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); 102 102 103 103 if (!evt->event_handler) { 104 - pr_info("Spurious APBT timer interrupt %d", irq); 104 + pr_info("Spurious APBT timer interrupt %d\n", irq); 105 105 return IRQ_NONE; 106 106 } 107 107 ··· 257 257 clockevents_calc_mult_shift(&dw_ced->ced, freq, APBT_MIN_PERIOD); 258 258 dw_ced->ced.max_delta_ns = clockevent_delta2ns(0x7fffffff, 259 259 &dw_ced->ced); 260 + dw_ced->ced.max_delta_ticks = 0x7fffffff; 260 261 dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced); 262 + dw_ced->ced.min_delta_ticks = 5000; 261 263 dw_ced->ced.cpumask = cpumask_of(cpu); 262 264 dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | 263 265 CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
+26 -20
drivers/clocksource/em_sti.c
··· 78 78 int ret; 79 79 80 80 /* enable clock */ 81 - ret = clk_prepare_enable(p->clk); 81 + ret = clk_enable(p->clk); 82 82 if (ret) { 83 83 dev_err(&p->pdev->dev, "cannot enable clock\n"); 84 84 return ret; 85 85 } 86 - 87 - /* configure channel, periodic mode and maximum timeout */ 88 - p->rate = clk_get_rate(p->clk); 89 86 90 87 /* reset the counter */ 91 88 em_sti_write(p, STI_SET_H, 0x40000000); ··· 104 107 em_sti_write(p, STI_INTENCLR, 3); 105 108 106 109 /* stop clock */ 107 - clk_disable_unprepare(p->clk); 110 + clk_disable(p->clk); 108 111 } 109 112 110 113 static u64 em_sti_count(struct em_sti_priv *p) ··· 202 205 203 206 static int em_sti_clocksource_enable(struct clocksource *cs) 204 207 { 205 - int ret; 206 208 struct em_sti_priv *p = cs_to_em_sti(cs); 207 209 208 - ret = em_sti_start(p, USER_CLOCKSOURCE); 209 - if (!ret) 210 - __clocksource_update_freq_hz(cs, p->rate); 211 - return ret; 210 + return em_sti_start(p, USER_CLOCKSOURCE); 212 211 } 213 212 214 213 static void em_sti_clocksource_disable(struct clocksource *cs) ··· 233 240 234 241 dev_info(&p->pdev->dev, "used as clock source\n"); 235 242 236 - /* Register with dummy 1 Hz value, gets updated in ->enable() */ 237 - clocksource_register_hz(cs, 1); 243 + clocksource_register_hz(cs, p->rate); 238 244 return 0; 239 245 } 240 246 ··· 255 263 256 264 dev_info(&p->pdev->dev, "used for oneshot clock events\n"); 257 265 em_sti_start(p, USER_CLOCKEVENT); 258 - clockevents_config(&p->ced, p->rate); 259 266 return 0; 260 267 } 261 268 ··· 285 294 286 295 dev_info(&p->pdev->dev, "used for clock events\n"); 287 296 288 - /* Register with dummy 1 Hz value, gets updated in ->set_state_oneshot() */ 289 - clockevents_config_and_register(ced, 1, 2, 0xffffffff); 297 + clockevents_config_and_register(ced, p->rate, 2, 0xffffffff); 290 298 } 291 299 292 300 static int em_sti_probe(struct platform_device *pdev) ··· 293 303 struct em_sti_priv *p; 294 304 struct resource *res; 295 305 int irq; 306 + int ret; 296 307 297 308 p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); 298 309 if (p == NULL) ··· 314 323 if (IS_ERR(p->base)) 315 324 return PTR_ERR(p->base); 316 325 326 + if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt, 327 + IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, 328 + dev_name(&pdev->dev), p)) { 329 + dev_err(&pdev->dev, "failed to request low IRQ\n"); 330 + return -ENOENT; 331 + } 332 + 317 333 /* get hold of clock */ 318 334 p->clk = devm_clk_get(&pdev->dev, "sclk"); 319 335 if (IS_ERR(p->clk)) { ··· 328 330 return PTR_ERR(p->clk); 329 331 } 330 332 331 - if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt, 332 - IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, 333 - dev_name(&pdev->dev), p)) { 334 - dev_err(&pdev->dev, "failed to request low IRQ\n"); 335 - return -ENOENT; 333 + ret = clk_prepare(p->clk); 334 + if (ret < 0) { 335 + dev_err(&pdev->dev, "cannot prepare clock\n"); 336 + return ret; 336 337 } 338 + 339 + ret = clk_enable(p->clk); 340 + if (ret < 0) { 341 + dev_err(&p->pdev->dev, "cannot enable clock\n"); 342 + clk_unprepare(p->clk); 343 + return ret; 344 + } 345 + p->rate = clk_get_rate(p->clk); 346 + clk_disable(p->clk); 337 347 338 348 raw_spin_lock_init(&p->lock); 339 349 em_sti_register_clockevent(p);
-8
drivers/clocksource/h8300_timer8.c
··· 101 101 102 102 static void timer8_clock_event_start(struct timer8_priv *p, unsigned long delta) 103 103 { 104 - struct clock_event_device *ced = &p->ced; 105 - 106 104 timer8_start(p); 107 - 108 - ced->shift = 32; 109 - ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); 110 - ced->max_delta_ns = clockevent_delta2ns(0xffff, ced); 111 - ced->min_delta_ns = clockevent_delta2ns(0x0001, ced); 112 - 113 105 timer8_set_next(p, delta); 114 106 } 115 107
+2 -2
drivers/clocksource/meson6_timer.c
··· 133 133 134 134 timer_base = of_io_request_and_map(node, 0, "meson6-timer"); 135 135 if (IS_ERR(timer_base)) { 136 - pr_err("Can't map registers"); 136 + pr_err("Can't map registers\n"); 137 137 return -ENXIO; 138 138 } 139 139 140 140 irq = irq_of_parse_and_map(node, 0); 141 141 if (irq <= 0) { 142 - pr_err("Can't parse IRQ"); 142 + pr_err("Can't parse IRQ\n"); 143 143 return -EINVAL; 144 144 } 145 145
+2
drivers/clocksource/metag_generic.c
··· 114 114 115 115 clk->mult = div_sc(hwtimer_freq, NSEC_PER_SEC, clk->shift); 116 116 clk->max_delta_ns = clockevent_delta2ns(0x7fffffff, clk); 117 + clk->max_delta_ticks = 0x7fffffff; 117 118 clk->min_delta_ns = clockevent_delta2ns(0xf, clk); 119 + clk->min_delta_ticks = 0xf; 118 120 clk->cpumask = cpumask_of(cpu); 119 121 120 122 clockevents_register_device(clk);
+1 -14
drivers/clocksource/mips-gic-timer.c
··· 154 154 return ret; 155 155 } 156 156 157 - void __init gic_clocksource_init(unsigned int frequency) 158 - { 159 - gic_frequency = frequency; 160 - gic_timer_irq = MIPS_GIC_IRQ_BASE + 161 - GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_COMPARE); 162 - 163 - __gic_clocksource_init(); 164 - gic_clockevent_init(); 165 - 166 - /* And finally start the counter */ 167 - gic_start_count(); 168 - } 169 - 170 157 static int __init gic_clocksource_of_init(struct device_node *node) 171 158 { 172 159 struct clk *clk; ··· 161 174 162 175 if (!gic_present || !node->parent || 163 176 !of_device_is_compatible(node->parent, "mti,gic")) { 164 - pr_warn("No DT definition for the mips gic driver"); 177 + pr_warn("No DT definition for the mips gic driver\n"); 165 178 return -ENXIO; 166 179 } 167 180
+4 -4
drivers/clocksource/nomadik-mtu.c
··· 260 260 261 261 base = of_iomap(node, 0); 262 262 if (!base) { 263 - pr_err("Can't remap registers"); 263 + pr_err("Can't remap registers\n"); 264 264 return -ENXIO; 265 265 } 266 266 267 267 pclk = of_clk_get_by_name(node, "apb_pclk"); 268 268 if (IS_ERR(pclk)) { 269 - pr_err("could not get apb_pclk"); 269 + pr_err("could not get apb_pclk\n"); 270 270 return PTR_ERR(pclk); 271 271 } 272 272 273 273 clk = of_clk_get_by_name(node, "timclk"); 274 274 if (IS_ERR(clk)) { 275 - pr_err("could not get timclk"); 275 + pr_err("could not get timclk\n"); 276 276 return PTR_ERR(clk); 277 277 } 278 278 279 279 irq = irq_of_parse_and_map(node, 0); 280 280 if (irq <= 0) { 281 - pr_err("Can't parse IRQ"); 281 + pr_err("Can't parse IRQ\n"); 282 282 return -EINVAL; 283 283 } 284 284
+2
drivers/clocksource/numachip.c
··· 51 51 .mult = 1, 52 52 .shift = 0, 53 53 .min_delta_ns = 1250, 54 + .min_delta_ticks = 1250, 54 55 .max_delta_ns = LONG_MAX, 56 + .max_delta_ticks = LONG_MAX, 55 57 }; 56 58 57 59 static void numachip_timer_interrupt(void)
+3 -3
drivers/clocksource/pxa_timer.c
··· 166 166 167 167 ret = setup_irq(irq, &pxa_ost0_irq); 168 168 if (ret) { 169 - pr_err("Failed to setup irq"); 169 + pr_err("Failed to setup irq\n"); 170 170 return ret; 171 171 } 172 172 173 173 ret = clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200, 174 174 32, clocksource_mmio_readl_up); 175 175 if (ret) { 176 - pr_err("Failed to init clocksource"); 176 + pr_err("Failed to init clocksource\n"); 177 177 return ret; 178 178 } 179 179 ··· 203 203 204 204 ret = clk_prepare_enable(clk); 205 205 if (ret) { 206 - pr_crit("Failed to prepare clock"); 206 + pr_crit("Failed to prepare clock\n"); 207 207 return ret; 208 208 } 209 209
+166 -70
drivers/clocksource/rockchip_timer.c
··· 11 11 #include <linux/clockchips.h> 12 12 #include <linux/init.h> 13 13 #include <linux/interrupt.h> 14 + #include <linux/sched_clock.h> 15 + #include <linux/slab.h> 14 16 #include <linux/of.h> 15 17 #include <linux/of_address.h> 16 18 #include <linux/of_irq.h> ··· 21 19 22 20 #define TIMER_LOAD_COUNT0 0x00 23 21 #define TIMER_LOAD_COUNT1 0x04 22 + #define TIMER_CURRENT_VALUE0 0x08 23 + #define TIMER_CURRENT_VALUE1 0x0C 24 24 #define TIMER_CONTROL_REG3288 0x10 25 25 #define TIMER_CONTROL_REG3399 0x1c 26 26 #define TIMER_INT_STATUS 0x18 ··· 33 29 #define TIMER_MODE_USER_DEFINED_COUNT (1 << 1) 34 30 #define TIMER_INT_UNMASK (1 << 2) 35 31 36 - struct bc_timer { 37 - struct clock_event_device ce; 32 + struct rk_timer { 38 33 void __iomem *base; 39 34 void __iomem *ctrl; 35 + struct clk *clk; 36 + struct clk *pclk; 40 37 u32 freq; 38 + int irq; 41 39 }; 42 40 43 - static struct bc_timer bc_timer; 41 + struct rk_clkevt { 42 + struct clock_event_device ce; 43 + struct rk_timer timer; 44 + }; 44 45 45 - static inline struct bc_timer *rk_timer(struct clock_event_device *ce) 46 + static struct rk_clkevt *rk_clkevt; 47 + static struct rk_timer *rk_clksrc; 48 + 49 + static inline struct rk_timer *rk_timer(struct clock_event_device *ce) 46 50 { 47 - return container_of(ce, struct bc_timer, ce); 51 + return &container_of(ce, struct rk_clkevt, ce)->timer; 48 52 } 49 53 50 - static inline void __iomem *rk_base(struct clock_event_device *ce) 54 + static inline void rk_timer_disable(struct rk_timer *timer) 51 55 { 52 - return rk_timer(ce)->base; 56 + writel_relaxed(TIMER_DISABLE, timer->ctrl); 53 57 } 54 58 55 - static inline void __iomem *rk_ctrl(struct clock_event_device *ce) 59 + static inline void rk_timer_enable(struct rk_timer *timer, u32 flags) 56 60 { 57 - return rk_timer(ce)->ctrl; 58 - } 59 - 60 - static inline void rk_timer_disable(struct clock_event_device *ce) 61 - { 62 - writel_relaxed(TIMER_DISABLE, rk_ctrl(ce)); 63 - } 64 - 65 - static inline void rk_timer_enable(struct clock_event_device *ce, u32 flags) 66 - { 67 - writel_relaxed(TIMER_ENABLE | TIMER_INT_UNMASK | flags, 68 - rk_ctrl(ce)); 61 + writel_relaxed(TIMER_ENABLE | flags, timer->ctrl); 69 62 } 70 63 71 64 static void rk_timer_update_counter(unsigned long cycles, 72 - struct clock_event_device *ce) 65 + struct rk_timer *timer) 73 66 { 74 - writel_relaxed(cycles, rk_base(ce) + TIMER_LOAD_COUNT0); 75 - writel_relaxed(0, rk_base(ce) + TIMER_LOAD_COUNT1); 67 + writel_relaxed(cycles, timer->base + TIMER_LOAD_COUNT0); 68 + writel_relaxed(0, timer->base + TIMER_LOAD_COUNT1); 76 69 } 77 70 78 - static void rk_timer_interrupt_clear(struct clock_event_device *ce) 71 + static void rk_timer_interrupt_clear(struct rk_timer *timer) 79 72 { 80 - writel_relaxed(1, rk_base(ce) + TIMER_INT_STATUS); 73 + writel_relaxed(1, timer->base + TIMER_INT_STATUS); 81 74 } 82 75 83 76 static inline int rk_timer_set_next_event(unsigned long cycles, 84 77 struct clock_event_device *ce) 85 78 { 86 - rk_timer_disable(ce); 87 - rk_timer_update_counter(cycles, ce); 88 - rk_timer_enable(ce, TIMER_MODE_USER_DEFINED_COUNT); 79 + struct rk_timer *timer = rk_timer(ce); 80 + 81 + rk_timer_disable(timer); 82 + rk_timer_update_counter(cycles, timer); 83 + rk_timer_enable(timer, TIMER_MODE_USER_DEFINED_COUNT | 84 + TIMER_INT_UNMASK); 89 85 return 0; 90 86 } 91 87 92 88 static int rk_timer_shutdown(struct clock_event_device *ce) 93 89 { 94 - rk_timer_disable(ce); 90 + struct rk_timer *timer = rk_timer(ce); 91 + 92 + rk_timer_disable(timer); 95 93 return 0; 96 94 } 97 95 98 96 static int rk_timer_set_periodic(struct clock_event_device *ce) 99 97 { 100 - rk_timer_disable(ce); 101 - rk_timer_update_counter(rk_timer(ce)->freq / HZ - 1, ce); 102 - rk_timer_enable(ce, TIMER_MODE_FREE_RUNNING); 98 + struct rk_timer *timer = rk_timer(ce); 99 + 100 + rk_timer_disable(timer); 101 + rk_timer_update_counter(timer->freq / HZ - 1, timer); 102 + rk_timer_enable(timer, TIMER_MODE_FREE_RUNNING | TIMER_INT_UNMASK); 103 103 return 0; 104 104 } 105 105 106 106 static irqreturn_t rk_timer_interrupt(int irq, void *dev_id) 107 107 { 108 108 struct clock_event_device *ce = dev_id; 109 + struct rk_timer *timer = rk_timer(ce); 109 110 110 - rk_timer_interrupt_clear(ce); 111 + rk_timer_interrupt_clear(timer); 111 112 112 113 if (clockevent_state_oneshot(ce)) 113 - rk_timer_disable(ce); 114 + rk_timer_disable(timer); 114 115 115 116 ce->event_handler(ce); 116 117 117 118 return IRQ_HANDLED; 118 119 } 119 120 120 - static int __init rk_timer_init(struct device_node *np, u32 ctrl_reg) 121 + static u64 notrace rk_timer_sched_read(void) 121 122 { 122 - struct clock_event_device *ce = &bc_timer.ce; 123 + return ~readl_relaxed(rk_clksrc->base + TIMER_CURRENT_VALUE0); 124 + } 125 + 126 + static int __init 127 + rk_timer_probe(struct rk_timer *timer, struct device_node *np) 128 + { 123 129 struct clk *timer_clk; 124 130 struct clk *pclk; 125 131 int ret = -EINVAL, irq; 132 + u32 ctrl_reg = TIMER_CONTROL_REG3288; 126 133 127 - bc_timer.base = of_iomap(np, 0); 128 - if (!bc_timer.base) { 134 + timer->base = of_iomap(np, 0); 135 + if (!timer->base) { 129 136 pr_err("Failed to get base address for '%s'\n", TIMER_NAME); 130 137 return -ENXIO; 131 138 } 132 - bc_timer.ctrl = bc_timer.base + ctrl_reg; 139 + 140 + if (of_device_is_compatible(np, "rockchip,rk3399-timer")) 141 + ctrl_reg = TIMER_CONTROL_REG3399; 142 + 143 + timer->ctrl = timer->base + ctrl_reg; 133 144 134 145 pclk = of_clk_get_by_name(np, "pclk"); 135 146 if (IS_ERR(pclk)) { ··· 158 139 pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME); 159 140 goto out_unmap; 160 141 } 142 + timer->pclk = pclk; 161 143 162 144 timer_clk = of_clk_get_by_name(np, "timer"); 163 145 if (IS_ERR(timer_clk)) { ··· 172 152 pr_err("Failed to enable timer clock\n"); 173 153 goto out_timer_clk; 174 154 } 155 + timer->clk = timer_clk; 175 156 176 - bc_timer.freq = clk_get_rate(timer_clk); 157 + timer->freq = clk_get_rate(timer_clk); 177 158 178 159 irq = irq_of_parse_and_map(np, 0); 179 160 if (!irq) { ··· 182 161 pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); 183 162 goto out_irq; 184 163 } 164 + timer->irq = irq; 185 165 186 - ce->name = TIMER_NAME; 187 - ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | 188 - CLOCK_EVT_FEAT_DYNIRQ; 189 - ce->set_next_event = rk_timer_set_next_event; 190 - ce->set_state_shutdown = rk_timer_shutdown; 191 - ce->set_state_periodic = rk_timer_set_periodic; 192 - ce->irq = irq; 193 - ce->cpumask = cpu_possible_mask; 194 - ce->rating = 250; 195 - 196 - rk_timer_interrupt_clear(ce); 197 - rk_timer_disable(ce); 198 - 199 - ret = request_irq(irq, rk_timer_interrupt, IRQF_TIMER, TIMER_NAME, ce); 200 - if (ret) { 201 - pr_err("Failed to initialize '%s': %d\n", TIMER_NAME, ret); 202 - goto out_irq; 203 - } 204 - 205 - clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX); 206 - 166 + rk_timer_interrupt_clear(timer); 167 + rk_timer_disable(timer); 207 168 return 0; 208 169 209 170 out_irq: ··· 193 190 out_timer_clk: 194 191 clk_disable_unprepare(pclk); 195 192 out_unmap: 196 - iounmap(bc_timer.base); 193 + iounmap(timer->base); 197 194 198 195 return ret; 199 196 } 200 197 201 - static int __init rk3288_timer_init(struct device_node *np) 198 + static void __init rk_timer_cleanup(struct rk_timer *timer) 202 199 { 203 - return rk_timer_init(np, TIMER_CONTROL_REG3288); 200 + clk_disable_unprepare(timer->clk); 201 + clk_disable_unprepare(timer->pclk); 202 + iounmap(timer->base); 204 203 } 205 204 206 - static int __init rk3399_timer_init(struct device_node *np) 205 + static int __init rk_clkevt_init(struct device_node *np) 207 206 { 208 - return rk_timer_init(np, TIMER_CONTROL_REG3399); 207 + struct clock_event_device *ce; 208 + int ret = -EINVAL; 209 + 210 + rk_clkevt = kzalloc(sizeof(struct rk_clkevt), GFP_KERNEL); 211 + if (!rk_clkevt) { 212 + ret = -ENOMEM; 213 + goto out; 214 + } 215 + 216 + ret = rk_timer_probe(&rk_clkevt->timer, np); 217 + if (ret) 218 + goto out_probe; 219 + 220 + ce = &rk_clkevt->ce; 221 + ce->name = TIMER_NAME; 222 + ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | 223 + CLOCK_EVT_FEAT_DYNIRQ; 224 + ce->set_next_event = rk_timer_set_next_event; 225 + ce->set_state_shutdown = rk_timer_shutdown; 226 + ce->set_state_periodic = rk_timer_set_periodic; 227 + ce->irq = rk_clkevt->timer.irq; 228 + ce->cpumask = cpu_possible_mask; 229 + ce->rating = 250; 230 + 231 + ret = request_irq(rk_clkevt->timer.irq, rk_timer_interrupt, IRQF_TIMER, 232 + TIMER_NAME, ce); 233 + if (ret) { 234 + pr_err("Failed to initialize '%s': %d\n", 235 + TIMER_NAME, ret); 236 + goto out_irq; 237 + } 238 + 239 + clockevents_config_and_register(&rk_clkevt->ce, 240 + rk_clkevt->timer.freq, 1, UINT_MAX); 241 + return 0; 242 + 243 + out_irq: 244 + rk_timer_cleanup(&rk_clkevt->timer); 245 + out_probe: 246 + kfree(rk_clkevt); 247 + out: 248 + /* Leave rk_clkevt not NULL to prevent future init */ 249 + rk_clkevt = ERR_PTR(ret); 250 + return ret; 209 251 } 210 252 211 - CLOCKSOURCE_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer", 212 - rk3288_timer_init); 213 - CLOCKSOURCE_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer", 214 - rk3399_timer_init); 253 + static int __init rk_clksrc_init(struct device_node *np) 254 + { 255 + int ret = -EINVAL; 256 + 257 + rk_clksrc = kzalloc(sizeof(struct rk_timer), GFP_KERNEL); 258 + if (!rk_clksrc) { 259 + ret = -ENOMEM; 260 + goto out; 261 + } 262 + 263 + ret = rk_timer_probe(rk_clksrc, np); 264 + if (ret) 265 + goto out_probe; 266 + 267 + rk_timer_update_counter(UINT_MAX, rk_clksrc); 268 + rk_timer_enable(rk_clksrc, 0); 269 + 270 + ret = clocksource_mmio_init(rk_clksrc->base + TIMER_CURRENT_VALUE0, 271 + TIMER_NAME, rk_clksrc->freq, 250, 32, 272 + clocksource_mmio_readl_down); 273 + if (ret) { 274 + pr_err("Failed to register clocksource"); 275 + goto out_clocksource; 276 + } 277 + 278 + sched_clock_register(rk_timer_sched_read, 32, rk_clksrc->freq); 279 + return 0; 280 + 281 + out_clocksource: 282 + rk_timer_cleanup(rk_clksrc); 283 + out_probe: 284 + kfree(rk_clksrc); 285 + out: 286 + /* Leave rk_clksrc not NULL to prevent future init */ 287 + rk_clksrc = ERR_PTR(ret); 288 + return ret; 289 + } 290 + 291 + static int __init rk_timer_init(struct device_node *np) 292 + { 293 + if (!rk_clkevt) 294 + return rk_clkevt_init(np); 295 + 296 + if (!rk_clksrc) 297 + return rk_clksrc_init(np); 298 + 299 + pr_err("Too many timer definitions for '%s'\n", TIMER_NAME); 300 + return -EINVAL; 301 + } 302 + 303 + CLOCKSOURCE_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer", rk_timer_init); 304 + CLOCKSOURCE_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer", rk_timer_init);
+3 -3
drivers/clocksource/samsung_pwm_timer.c
··· 385 385 mask = ~pwm.variant.output_mask & ((1 << SAMSUNG_PWM_NUM) - 1); 386 386 channel = fls(mask) - 1; 387 387 if (channel < 0) { 388 - pr_crit("failed to find PWM channel for clocksource"); 388 + pr_crit("failed to find PWM channel for clocksource\n"); 389 389 return -EINVAL; 390 390 } 391 391 pwm.source_id = channel; ··· 393 393 mask &= ~(1 << channel); 394 394 channel = fls(mask) - 1; 395 395 if (channel < 0) { 396 - pr_crit("failed to find PWM channel for clock event"); 396 + pr_crit("failed to find PWM channel for clock event\n"); 397 397 return -EINVAL; 398 398 } 399 399 pwm.event_id = channel; ··· 448 448 449 449 pwm.timerclk = of_clk_get_by_name(np, "timers"); 450 450 if (IS_ERR(pwm.timerclk)) { 451 - pr_crit("failed to get timers clock for timer"); 451 + pr_crit("failed to get timers clock for timer\n"); 452 452 return PTR_ERR(pwm.timerclk); 453 453 } 454 454
+27 -20
drivers/clocksource/sh_cmt.c
··· 103 103 unsigned long match_value; 104 104 unsigned long next_match_value; 105 105 unsigned long max_match_value; 106 - unsigned long rate; 107 106 raw_spinlock_t lock; 108 107 struct clock_event_device ced; 109 108 struct clocksource cs; ··· 117 118 118 119 void __iomem *mapbase; 119 120 struct clk *clk; 121 + unsigned long rate; 120 122 121 123 raw_spinlock_t lock; /* Protect the shared start/stop register */ 122 124 ··· 320 320 raw_spin_unlock_irqrestore(&ch->cmt->lock, flags); 321 321 } 322 322 323 - static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate) 323 + static int sh_cmt_enable(struct sh_cmt_channel *ch) 324 324 { 325 325 int k, ret; 326 326 ··· 340 340 341 341 /* configure channel, periodic mode and maximum timeout */ 342 342 if (ch->cmt->info->width == 16) { 343 - *rate = clk_get_rate(ch->cmt->clk) / 512; 344 343 sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE | 345 344 SH_CMT16_CMCSR_CKS512); 346 345 } else { 347 - *rate = clk_get_rate(ch->cmt->clk) / 8; 348 346 sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM | 349 347 SH_CMT32_CMCSR_CMTOUT_IE | 350 348 SH_CMT32_CMCSR_CMR_IRQ | ··· 570 572 raw_spin_lock_irqsave(&ch->lock, flags); 571 573 572 574 if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) 573 - ret = sh_cmt_enable(ch, &ch->rate); 575 + ret = sh_cmt_enable(ch); 574 576 575 577 if (ret) 576 578 goto out; ··· 638 640 ch->total_cycles = 0; 639 641 640 642 ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE); 641 - if (!ret) { 642 - __clocksource_update_freq_hz(cs, ch->rate); 643 + if (!ret) 643 644 ch->cs_enabled = true; 644 - } 645 + 645 646 return ret; 646 647 } 647 648 ··· 694 697 dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n", 695 698 ch->index); 696 699 697 - /* Register with dummy 1 Hz value, gets updated in ->enable() */ 698 - clocksource_register_hz(cs, 1); 700 + clocksource_register_hz(cs, ch->cmt->rate); 699 701 return 0; 700 702 } 701 703 ··· 705 709 706 710 static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic) 707 711 { 708 - struct clock_event_device *ced = &ch->ced; 709 - 710 712 sh_cmt_start(ch, FLAG_CLOCKEVENT); 711 713 712 - /* TODO: calculate good shift from rate and counter bit width */ 713 - 714 - ced->shift = 32; 715 - ced->mult = div_sc(ch->rate, NSEC_PER_SEC, ced->shift); 716 - ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced); 717 - ced->min_delta_ns = clockevent_delta2ns(0x1f, ced); 718 - 719 714 if (periodic) 720 - sh_cmt_set_next(ch, ((ch->rate + HZ/2) / HZ) - 1); 715 + sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1); 721 716 else 722 717 sh_cmt_set_next(ch, ch->max_match_value); 723 718 } ··· 810 823 ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot; 811 824 ced->suspend = sh_cmt_clock_event_suspend; 812 825 ced->resume = sh_cmt_clock_event_resume; 826 + 827 + /* TODO: calculate good shift from rate and counter bit width */ 828 + ced->shift = 32; 829 + ced->mult = div_sc(ch->cmt->rate, NSEC_PER_SEC, ced->shift); 830 + ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced); 831 + ced->max_delta_ticks = ch->max_match_value; 832 + ced->min_delta_ns = clockevent_delta2ns(0x1f, ced); 833 + ced->min_delta_ticks = 0x1f; 813 834 814 835 dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n", 815 836 ch->index); ··· 990 995 ret = clk_prepare(cmt->clk); 991 996 if (ret < 0) 992 997 goto err_clk_put; 998 + 999 + /* Determine clock rate. */ 1000 + ret = clk_enable(cmt->clk); 1001 + if (ret < 0) 1002 + goto err_clk_unprepare; 1003 + 1004 + if (cmt->info->width == 16) 1005 + cmt->rate = clk_get_rate(cmt->clk) / 512; 1006 + else 1007 + cmt->rate = clk_get_rate(cmt->clk) / 8; 1008 + 1009 + clk_disable(cmt->clk); 993 1010 994 1011 /* Map the memory resource(s). */ 995 1012 ret = sh_cmt_map_memory(cmt);
+13 -13
drivers/clocksource/sh_tmu.c
··· 46 46 void __iomem *base; 47 47 int irq; 48 48 49 - unsigned long rate; 50 49 unsigned long periodic; 51 50 struct clock_event_device ced; 52 51 struct clocksource cs; ··· 58 59 59 60 void __iomem *mapbase; 60 61 struct clk *clk; 62 + unsigned long rate; 61 63 62 64 enum sh_tmu_model model; 63 65 ··· 165 165 sh_tmu_write(ch, TCNT, 0xffffffff); 166 166 167 167 /* configure channel to parent clock / 4, irq off */ 168 - ch->rate = clk_get_rate(ch->tmu->clk) / 4; 169 168 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); 170 169 171 170 /* enable channel */ ··· 270 271 return 0; 271 272 272 273 ret = sh_tmu_enable(ch); 273 - if (!ret) { 274 - __clocksource_update_freq_hz(cs, ch->rate); 274 + if (!ret) 275 275 ch->cs_enabled = true; 276 - } 277 276 278 277 return ret; 279 278 } ··· 331 334 dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n", 332 335 ch->index); 333 336 334 - /* Register with dummy 1 Hz value, gets updated in ->enable() */ 335 - clocksource_register_hz(cs, 1); 337 + clocksource_register_hz(cs, ch->tmu->rate); 336 338 return 0; 337 339 } 338 340 ··· 342 346 343 347 static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic) 344 348 { 345 - struct clock_event_device *ced = &ch->ced; 346 - 347 349 sh_tmu_enable(ch); 348 350 349 - clockevents_config(ced, ch->rate); 350 - 351 351 if (periodic) { 352 - ch->periodic = (ch->rate + HZ/2) / HZ; 352 + ch->periodic = (ch->tmu->rate + HZ/2) / HZ; 353 353 sh_tmu_set_next(ch, ch->periodic, 1); 354 354 } 355 355 } ··· 427 435 dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n", 428 436 ch->index); 429 437 430 - clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); 438 + clockevents_config_and_register(ced, ch->tmu->rate, 0x300, 0xffffffff); 431 439 432 440 ret = request_irq(ch->irq, sh_tmu_interrupt, 433 441 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, ··· 552 560 ret = clk_prepare(tmu->clk); 553 561 if (ret < 0) 554 562 goto err_clk_put; 563 + 564 + /* Determine clock rate. */ 565 + ret = clk_enable(tmu->clk); 566 + if (ret < 0) 567 + goto err_clk_unprepare; 568 + 569 + tmu->rate = clk_get_rate(tmu->clk) / 4; 570 + clk_disable(tmu->clk); 555 571 556 572 /* Map the memory resource. */ 557 573 ret = sh_tmu_map_memory(tmu);
+5 -5
drivers/clocksource/sun4i_timer.c
··· 159 159 160 160 timer_base = of_iomap(node, 0); 161 161 if (!timer_base) { 162 - pr_crit("Can't map registers"); 162 + pr_crit("Can't map registers\n"); 163 163 return -ENXIO; 164 164 } 165 165 166 166 irq = irq_of_parse_and_map(node, 0); 167 167 if (irq <= 0) { 168 - pr_crit("Can't parse IRQ"); 168 + pr_crit("Can't parse IRQ\n"); 169 169 return -EINVAL; 170 170 } 171 171 172 172 clk = of_clk_get(node, 0); 173 173 if (IS_ERR(clk)) { 174 - pr_crit("Can't get timer clock"); 174 + pr_crit("Can't get timer clock\n"); 175 175 return PTR_ERR(clk); 176 176 } 177 177 178 178 ret = clk_prepare_enable(clk); 179 179 if (ret) { 180 - pr_err("Failed to prepare clock"); 180 + pr_err("Failed to prepare clock\n"); 181 181 return ret; 182 182 } 183 183 ··· 200 200 ret = clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, 201 201 rate, 350, 32, clocksource_mmio_readl_down); 202 202 if (ret) { 203 - pr_err("Failed to register clocksource"); 203 + pr_err("Failed to register clocksource\n"); 204 204 return ret; 205 205 } 206 206
+1 -1
drivers/clocksource/tegra20_timer.c
··· 245 245 246 246 rtc_base = of_iomap(np, 0); 247 247 if (!rtc_base) { 248 - pr_err("Can't map RTC registers"); 248 + pr_err("Can't map RTC registers\n"); 249 249 return -ENXIO; 250 250 } 251 251
+8 -8
drivers/clocksource/time-armada-370-xp.c
··· 247 247 248 248 timer_base = of_iomap(np, 0); 249 249 if (!timer_base) { 250 - pr_err("Failed to iomap"); 250 + pr_err("Failed to iomap\n"); 251 251 return -ENXIO; 252 252 } 253 253 254 254 local_base = of_iomap(np, 1); 255 255 if (!local_base) { 256 - pr_err("Failed to iomap"); 256 + pr_err("Failed to iomap\n"); 257 257 return -ENXIO; 258 258 } 259 259 ··· 298 298 "armada_370_xp_clocksource", 299 299 timer_clk, 300, 32, clocksource_mmio_readl_down); 300 300 if (res) { 301 - pr_err("Failed to initialize clocksource mmio"); 301 + pr_err("Failed to initialize clocksource mmio\n"); 302 302 return res; 303 303 } 304 304 ··· 315 315 armada_370_xp_evt); 316 316 /* Immediately configure the timer on the boot CPU */ 317 317 if (res) { 318 - pr_err("Failed to request percpu irq"); 318 + pr_err("Failed to request percpu irq\n"); 319 319 return res; 320 320 } 321 321 ··· 324 324 armada_370_xp_timer_starting_cpu, 325 325 armada_370_xp_timer_dying_cpu); 326 326 if (res) { 327 - pr_err("Failed to setup hotplug state and timer"); 327 + pr_err("Failed to setup hotplug state and timer\n"); 328 328 return res; 329 329 } 330 330 ··· 339 339 int ret; 340 340 341 341 if (IS_ERR(clk)) { 342 - pr_err("Failed to get clock"); 342 + pr_err("Failed to get clock\n"); 343 343 return PTR_ERR(clk); 344 344 } 345 345 ··· 375 375 376 376 /* Must have at least a clock */ 377 377 if (IS_ERR(clk)) { 378 - pr_err("Failed to get clock"); 378 + pr_err("Failed to get clock\n"); 379 379 return PTR_ERR(clk); 380 380 } 381 381 ··· 399 399 400 400 clk = of_clk_get(np, 0); 401 401 if (IS_ERR(clk)) { 402 - pr_err("Failed to get clock"); 402 + pr_err("Failed to get clock\n"); 403 403 return PTR_ERR(clk); 404 404 } 405 405
+1 -1
drivers/clocksource/time-efm32.c
··· 235 235 236 236 ret = setup_irq(irq, &efm32_clock_event_irq); 237 237 if (ret) { 238 - pr_err("Failed setup irq"); 238 + pr_err("Failed setup irq\n"); 239 239 goto err_setup_irq; 240 240 } 241 241
+28 -6
drivers/clocksource/time-orion.c
··· 15 15 #include <linux/bitops.h> 16 16 #include <linux/clk.h> 17 17 #include <linux/clockchips.h> 18 + #include <linux/delay.h> 18 19 #include <linux/interrupt.h> 19 20 #include <linux/of_address.h> 20 21 #include <linux/of_irq.h> ··· 36 35 #define ORION_ONESHOT_MAX 0xfffffffe 37 36 38 37 static void __iomem *timer_base; 38 + 39 + static unsigned long notrace orion_read_timer(void) 40 + { 41 + return ~readl(timer_base + TIMER0_VAL); 42 + } 43 + 44 + static struct delay_timer orion_delay_timer = { 45 + .read_current_timer = orion_read_timer, 46 + }; 47 + 48 + static void orion_delay_timer_init(unsigned long rate) 49 + { 50 + orion_delay_timer.freq = rate; 51 + register_current_timer_delay(&orion_delay_timer); 52 + } 39 53 40 54 /* 41 55 * Free-running clocksource handling. ··· 122 106 123 107 static int __init orion_timer_init(struct device_node *np) 124 108 { 109 + unsigned long rate; 125 110 struct clk *clk; 126 111 int irq, ret; 127 112 ··· 141 124 142 125 ret = clk_prepare_enable(clk); 143 126 if (ret) { 144 - pr_err("Failed to prepare clock"); 127 + pr_err("Failed to prepare clock\n"); 145 128 return ret; 146 129 } 147 130 ··· 152 135 return -EINVAL; 153 136 } 154 137 138 + rate = clk_get_rate(clk); 139 + 155 140 /* setup timer0 as free-running clocksource */ 156 141 writel(~0, timer_base + TIMER0_VAL); 157 142 writel(~0, timer_base + TIMER0_RELOAD); ··· 161 142 TIMER0_RELOAD_EN | TIMER0_EN, 162 143 TIMER0_RELOAD_EN | TIMER0_EN); 163 144 164 - ret = clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource", 165 - clk_get_rate(clk), 300, 32, 145 + ret = clocksource_mmio_init(timer_base + TIMER0_VAL, 146 + "orion_clocksource", rate, 300, 32, 166 147 clocksource_mmio_readl_down); 167 148 if (ret) { 168 - pr_err("Failed to initialize mmio timer"); 149 + pr_err("Failed to initialize mmio timer\n"); 169 150 return ret; 170 151 } 171 152 172 - sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk)); 153 + sched_clock_register(orion_read_sched_clock, 32, rate); 173 154 174 155 /* setup timer1 as clockevent timer */ 175 156 ret = setup_irq(irq, &orion_clkevt_irq); ··· 181 162 ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ; 182 163 orion_clkevt.cpumask = cpumask_of(0); 183 164 orion_clkevt.irq = irq; 184 - clockevents_config_and_register(&orion_clkevt, clk_get_rate(clk), 165 + clockevents_config_and_register(&orion_clkevt, rate, 185 166 ORION_ONESHOT_MIN, ORION_ONESHOT_MAX); 167 + 168 + 169 + orion_delay_timer_init(rate); 186 170 187 171 return 0; 188 172 }
+2
drivers/clocksource/timer-atlas7.c
··· 192 192 ce->set_next_event = sirfsoc_timer_set_next_event; 193 193 clockevents_calc_mult_shift(ce, atlas7_timer_rate, 60); 194 194 ce->max_delta_ns = clockevent_delta2ns(-2, ce); 195 + ce->max_delta_ticks = (unsigned long)-2; 195 196 ce->min_delta_ns = clockevent_delta2ns(2, ce); 197 + ce->min_delta_ticks = 2; 196 198 ce->cpumask = cpumask_of(cpu); 197 199 198 200 action->dev_id = ce;
+1 -1
drivers/clocksource/timer-atmel-pit.c
··· 226 226 227 227 ret = clocksource_register_hz(&data->clksrc, pit_rate); 228 228 if (ret) { 229 - pr_err("Failed to register clocksource"); 229 + pr_err("Failed to register clocksource\n"); 230 230 return ret; 231 231 } 232 232
+3 -3
drivers/clocksource/timer-digicolor.c
··· 161 161 */ 162 162 dc_timer_dev.base = of_iomap(node, 0); 163 163 if (!dc_timer_dev.base) { 164 - pr_err("Can't map registers"); 164 + pr_err("Can't map registers\n"); 165 165 return -ENXIO; 166 166 } 167 167 168 168 irq = irq_of_parse_and_map(node, dc_timer_dev.timer_id); 169 169 if (irq <= 0) { 170 - pr_err("Can't parse IRQ"); 170 + pr_err("Can't parse IRQ\n"); 171 171 return -EINVAL; 172 172 } 173 173 174 174 clk = of_clk_get(node, 0); 175 175 if (IS_ERR(clk)) { 176 - pr_err("Can't get timer clock"); 176 + pr_err("Can't get timer clock\n"); 177 177 return PTR_ERR(clk); 178 178 } 179 179 clk_prepare_enable(clk);
+95 -69
drivers/clocksource/timer-gemini.c drivers/clocksource/timer-fttmr010.c
··· 1 1 /* 2 - * Gemini timer driver 2 + * Faraday Technology FTTMR010 timer driver 3 3 * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> 4 4 * 5 5 * Based on a rewrite of arch/arm/mach-gemini/timer.c: ··· 16 16 #include <linux/clockchips.h> 17 17 #include <linux/clocksource.h> 18 18 #include <linux/sched_clock.h> 19 - 20 - /* 21 - * Relevant registers in the global syscon 22 - */ 23 - #define GLOBAL_STATUS 0x04 24 - #define CPU_AHB_RATIO_MASK (0x3 << 18) 25 - #define CPU_AHB_1_1 (0x0 << 18) 26 - #define CPU_AHB_3_2 (0x1 << 18) 27 - #define CPU_AHB_24_13 (0x2 << 18) 28 - #define CPU_AHB_2_1 (0x3 << 18) 29 - #define REG_TO_AHB_SPEED(reg) ((((reg) >> 15) & 0x7) * 10 + 130) 19 + #include <linux/clk.h> 30 20 31 21 /* 32 22 * Register definitions for the timers ··· 67 77 static unsigned int tick_rate; 68 78 static void __iomem *base; 69 79 70 - static u64 notrace gemini_read_sched_clock(void) 80 + static u64 notrace fttmr010_read_sched_clock(void) 71 81 { 72 82 return readl(base + TIMER3_COUNT); 73 83 } 74 84 75 - static int gemini_timer_set_next_event(unsigned long cycles, 85 + static int fttmr010_timer_set_next_event(unsigned long cycles, 76 86 struct clock_event_device *evt) 77 87 { 78 88 u32 cr; ··· 86 96 return 0; 87 97 } 88 98 89 - static int gemini_timer_shutdown(struct clock_event_device *evt) 99 + static int fttmr010_timer_shutdown(struct clock_event_device *evt) 90 100 { 91 101 u32 cr; 92 102 ··· 117 127 return 0; 118 128 } 119 129 120 - static int gemini_timer_set_periodic(struct clock_event_device *evt) 130 + static int fttmr010_timer_set_periodic(struct clock_event_device *evt) 121 131 { 122 132 u32 period = DIV_ROUND_CLOSEST(tick_rate, HZ); 123 133 u32 cr; ··· 148 158 } 149 159 150 160 /* Use TIMER1 as clock event */ 151 - static struct clock_event_device gemini_clockevent = { 161 + static struct clock_event_device fttmr010_clockevent = { 152 162 .name = "TIMER1", 153 163 /* Reasonably fast and accurate clock event */ 154 164 .rating = 300, 155 165 .shift = 32, 156 166 .features = CLOCK_EVT_FEAT_PERIODIC | 157 167 CLOCK_EVT_FEAT_ONESHOT, 158 - .set_next_event = gemini_timer_set_next_event, 159 - .set_state_shutdown = gemini_timer_shutdown, 160 - .set_state_periodic = gemini_timer_set_periodic, 161 - .set_state_oneshot = gemini_timer_shutdown, 162 - .tick_resume = gemini_timer_shutdown, 168 + .set_next_event = fttmr010_timer_set_next_event, 169 + .set_state_shutdown = fttmr010_timer_shutdown, 170 + .set_state_periodic = fttmr010_timer_set_periodic, 171 + .set_state_oneshot = fttmr010_timer_shutdown, 172 + .tick_resume = fttmr010_timer_shutdown, 163 173 }; 164 174 165 175 /* 166 176 * IRQ handler for the timer 167 177 */ 168 - static irqreturn_t gemini_timer_interrupt(int irq, void *dev_id) 178 + static irqreturn_t fttmr010_timer_interrupt(int irq, void *dev_id) 169 179 { 170 - struct clock_event_device *evt = &gemini_clockevent; 180 + struct clock_event_device *evt = &fttmr010_clockevent; 171 181 172 182 evt->event_handler(evt); 173 183 return IRQ_HANDLED; 174 184 } 175 185 176 - static struct irqaction gemini_timer_irq = { 177 - .name = "Gemini Timer Tick", 186 + static struct irqaction fttmr010_timer_irq = { 187 + .name = "Faraday FTTMR010 Timer Tick", 178 188 .flags = IRQF_TIMER, 179 - .handler = gemini_timer_interrupt, 189 + .handler = fttmr010_timer_interrupt, 180 190 }; 181 191 182 - static int __init gemini_timer_of_init(struct device_node *np) 192 + static int __init fttmr010_timer_common_init(struct device_node *np) 183 193 { 184 - static struct regmap *map; 185 194 int irq; 186 - int ret; 187 - u32 val; 188 - 189 - map = syscon_regmap_lookup_by_phandle(np, "syscon"); 190 - if (IS_ERR(map)) { 191 - pr_err("Can't get regmap for syscon handle"); 192 - return -ENODEV; 193 - } 194 - ret = regmap_read(map, GLOBAL_STATUS, &val); 195 - if (ret) { 196 - pr_err("Can't read syscon status register"); 197 - return -ENXIO; 198 - } 199 195 200 196 base = of_iomap(np, 0); 201 197 if (!base) { ··· 193 217 if (irq <= 0) { 194 218 pr_err("Can't parse IRQ"); 195 219 return -EINVAL; 196 - } 197 - 198 - tick_rate = REG_TO_AHB_SPEED(val) * 1000000; 199 - printk(KERN_INFO "Bus: %dMHz", tick_rate / 1000000); 200 - 201 - tick_rate /= 6; /* APB bus run AHB*(1/6) */ 202 - 203 - switch (val & CPU_AHB_RATIO_MASK) { 204 - case CPU_AHB_1_1: 205 - printk(KERN_CONT "(1/1)\n"); 206 - break; 207 - case CPU_AHB_3_2: 208 - printk(KERN_CONT "(3/2)\n"); 209 - break; 210 - case CPU_AHB_24_13: 211 - printk(KERN_CONT "(24/13)\n"); 212 - break; 213 - case CPU_AHB_2_1: 214 - printk(KERN_CONT "(2/1)\n"); 215 - break; 216 220 } 217 221 218 222 /* ··· 211 255 writel(0, base + TIMER3_MATCH1); 212 256 writel(0, base + TIMER3_MATCH2); 213 257 clocksource_mmio_init(base + TIMER3_COUNT, 214 - "gemini_clocksource", tick_rate, 258 + "fttmr010_clocksource", tick_rate, 215 259 300, 32, clocksource_mmio_readl_up); 216 - sched_clock_register(gemini_read_sched_clock, 32, tick_rate); 260 + sched_clock_register(fttmr010_read_sched_clock, 32, tick_rate); 217 261 218 262 /* 219 263 * Setup clockevent timer (interrupt-driven.) ··· 222 266 writel(0, base + TIMER1_LOAD); 223 267 writel(0, base + TIMER1_MATCH1); 224 268 writel(0, base + TIMER1_MATCH2); 225 - setup_irq(irq, &gemini_timer_irq); 226 - gemini_clockevent.cpumask = cpumask_of(0); 227 - clockevents_config_and_register(&gemini_clockevent, tick_rate, 269 + setup_irq(irq, &fttmr010_timer_irq); 270 + fttmr010_clockevent.cpumask = cpumask_of(0); 271 + clockevents_config_and_register(&fttmr010_clockevent, tick_rate, 228 272 1, 0xffffffff); 229 273 230 274 return 0; 231 275 } 232 - CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "cortina,gemini-timer", 233 - gemini_timer_of_init); 276 + 277 + static int __init fttmr010_timer_of_init(struct device_node *np) 278 + { 279 + /* 280 + * These implementations require a clock reference. 281 + * FIXME: we currently only support clocking using PCLK 282 + * and using EXTCLK is not supported in the driver. 283 + */ 284 + struct clk *clk; 285 + 286 + clk = of_clk_get_by_name(np, "PCLK"); 287 + if (IS_ERR(clk)) { 288 + pr_err("could not get PCLK"); 289 + return PTR_ERR(clk); 290 + } 291 + tick_rate = clk_get_rate(clk); 292 + 293 + return fttmr010_timer_common_init(np); 294 + } 295 + CLOCKSOURCE_OF_DECLARE(fttmr010, "faraday,fttmr010", fttmr010_timer_of_init); 296 + 297 + /* 298 + * Gemini-specific: relevant registers in the global syscon 299 + */ 300 + #define GLOBAL_STATUS 0x04 301 + #define CPU_AHB_RATIO_MASK (0x3 << 18) 302 + #define CPU_AHB_1_1 (0x0 << 18) 303 + #define CPU_AHB_3_2 (0x1 << 18) 304 + #define CPU_AHB_24_13 (0x2 << 18) 305 + #define CPU_AHB_2_1 (0x3 << 18) 306 + #define REG_TO_AHB_SPEED(reg) ((((reg) >> 15) & 0x7) * 10 + 130) 307 + 308 + static int __init gemini_timer_of_init(struct device_node *np) 309 + { 310 + static struct regmap *map; 311 + int ret; 312 + u32 val; 313 + 314 + map = syscon_regmap_lookup_by_phandle(np, "syscon"); 315 + if (IS_ERR(map)) { 316 + pr_err("Can't get regmap for syscon handle\n"); 317 + return -ENODEV; 318 + } 319 + ret = regmap_read(map, GLOBAL_STATUS, &val); 320 + if (ret) { 321 + pr_err("Can't read syscon status register\n"); 322 + return -ENXIO; 323 + } 324 + 325 + tick_rate = REG_TO_AHB_SPEED(val) * 1000000; 326 + pr_info("Bus: %dMHz ", tick_rate / 1000000); 327 + 328 + tick_rate /= 6; /* APB bus run AHB*(1/6) */ 329 + 330 + switch (val & CPU_AHB_RATIO_MASK) { 331 + case CPU_AHB_1_1: 332 + pr_cont("(1/1)\n"); 333 + break; 334 + case CPU_AHB_3_2: 335 + pr_cont("(3/2)\n"); 336 + break; 337 + case CPU_AHB_24_13: 338 + pr_cont("(24/13)\n"); 339 + break; 340 + case CPU_AHB_2_1: 341 + pr_cont("(2/1)\n"); 342 + break; 343 + } 344 + 345 + return fttmr010_timer_common_init(np); 346 + } 347 + CLOCKSOURCE_OF_DECLARE(gemini, "cortina,gemini-timer", gemini_timer_of_init);
+2 -2
drivers/clocksource/timer-integrator-ap.c
··· 200 200 err = of_property_read_string(of_aliases, 201 201 "arm,timer-primary", &path); 202 202 if (err) { 203 - pr_warn("Failed to read property"); 203 + pr_warn("Failed to read property\n"); 204 204 return err; 205 205 } 206 206 ··· 209 209 err = of_property_read_string(of_aliases, 210 210 "arm,timer-secondary", &path); 211 211 if (err) { 212 - pr_warn("Failed to read property"); 212 + pr_warn("Failed to read property\n"); 213 213 return err; 214 214 } 215 215
+3 -3
drivers/clocksource/timer-nps.c
··· 55 55 *clk = of_clk_get(node, 0); 56 56 ret = PTR_ERR_OR_ZERO(*clk); 57 57 if (ret) { 58 - pr_err("timer missing clk"); 58 + pr_err("timer missing clk\n"); 59 59 return ret; 60 60 } 61 61 ··· 247 247 248 248 nps_timer0_irq = irq_of_parse_and_map(node, 0); 249 249 if (nps_timer0_irq <= 0) { 250 - pr_err("clockevent: missing irq"); 250 + pr_err("clockevent: missing irq\n"); 251 251 return -EINVAL; 252 252 } 253 253 ··· 270 270 nps_timer_starting_cpu, 271 271 nps_timer_dying_cpu); 272 272 if (ret) { 273 - pr_err("Failed to setup hotplug state"); 273 + pr_err("Failed to setup hotplug state\n"); 274 274 clk_disable_unprepare(clk); 275 275 free_percpu_irq(nps_timer0_irq, &nps_clockevent_device); 276 276 return ret;
+5 -5
drivers/clocksource/timer-prima2.c
··· 196 196 197 197 clk = of_clk_get(np, 0); 198 198 if (IS_ERR(clk)) { 199 - pr_err("Failed to get clock"); 199 + pr_err("Failed to get clock\n"); 200 200 return PTR_ERR(clk); 201 201 } 202 202 203 203 ret = clk_prepare_enable(clk); 204 204 if (ret) { 205 - pr_err("Failed to enable clock"); 205 + pr_err("Failed to enable clock\n"); 206 206 return ret; 207 207 } 208 208 209 209 rate = clk_get_rate(clk); 210 210 211 211 if (rate < PRIMA2_CLOCK_FREQ || rate % PRIMA2_CLOCK_FREQ) { 212 - pr_err("Invalid clock rate"); 212 + pr_err("Invalid clock rate\n"); 213 213 return -EINVAL; 214 214 } 215 215 ··· 229 229 230 230 ret = clocksource_register_hz(&sirfsoc_clocksource, PRIMA2_CLOCK_FREQ); 231 231 if (ret) { 232 - pr_err("Failed to register clocksource"); 232 + pr_err("Failed to register clocksource\n"); 233 233 return ret; 234 234 } 235 235 ··· 237 237 238 238 ret = setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); 239 239 if (ret) { 240 - pr_err("Failed to setup irq"); 240 + pr_err("Failed to setup irq\n"); 241 241 return ret; 242 242 } 243 243
+2 -2
drivers/clocksource/timer-sp804.c
··· 299 299 300 300 base = of_iomap(np, 0); 301 301 if (!base) { 302 - pr_err("Failed to iomap"); 302 + pr_err("Failed to iomap\n"); 303 303 return -ENXIO; 304 304 } 305 305 306 306 clk = of_clk_get(np, 0); 307 307 if (IS_ERR(clk)) { 308 - pr_err("Failed to get clock"); 308 + pr_err("Failed to get clock\n"); 309 309 return PTR_ERR(clk); 310 310 } 311 311
+3 -3
drivers/clocksource/timer-sun5i.c
··· 332 332 333 333 timer_base = of_io_request_and_map(node, 0, of_node_full_name(node)); 334 334 if (IS_ERR(timer_base)) { 335 - pr_err("Can't map registers"); 335 + pr_err("Can't map registers\n"); 336 336 return PTR_ERR(timer_base);; 337 337 } 338 338 339 339 irq = irq_of_parse_and_map(node, 0); 340 340 if (irq <= 0) { 341 - pr_err("Can't parse IRQ"); 341 + pr_err("Can't parse IRQ\n"); 342 342 return -EINVAL; 343 343 } 344 344 345 345 clk = of_clk_get(node, 0); 346 346 if (IS_ERR(clk)) { 347 - pr_err("Can't get timer clock"); 347 + pr_err("Can't get timer clock\n"); 348 348 return PTR_ERR(clk); 349 349 } 350 350
+1 -1
drivers/clocksource/vf_pit_timer.c
··· 165 165 166 166 timer_base = of_iomap(np, 0); 167 167 if (!timer_base) { 168 - pr_err("Failed to iomap"); 168 + pr_err("Failed to iomap\n"); 169 169 return -ENXIO; 170 170 } 171 171
+7 -11
drivers/ptp/ptp_clock.c
··· 97 97 98 98 /* posix clock implementation */ 99 99 100 - static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp) 100 + static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp) 101 101 { 102 102 tp->tv_sec = 0; 103 103 tp->tv_nsec = 1; 104 104 return 0; 105 105 } 106 106 107 - static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp) 107 + static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp) 108 108 { 109 109 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 110 - struct timespec64 ts = timespec_to_timespec64(*tp); 111 110 112 - return ptp->info->settime64(ptp->info, &ts); 111 + return ptp->info->settime64(ptp->info, tp); 113 112 } 114 113 115 - static int ptp_clock_gettime(struct posix_clock *pc, struct timespec *tp) 114 + static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp) 116 115 { 117 116 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 118 - struct timespec64 ts; 119 117 int err; 120 118 121 - err = ptp->info->gettime64(ptp->info, &ts); 122 - if (!err) 123 - *tp = timespec64_to_timespec(ts); 119 + err = ptp->info->gettime64(ptp->info, tp); 124 120 return err; 125 121 } 126 122 ··· 129 133 ops = ptp->info; 130 134 131 135 if (tx->modes & ADJ_SETOFFSET) { 132 - struct timespec ts; 136 + struct timespec64 ts; 133 137 ktime_t kt; 134 138 s64 delta; 135 139 ··· 142 146 if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC) 143 147 return -EINVAL; 144 148 145 - kt = timespec_to_ktime(ts); 149 + kt = timespec64_to_ktime(ts); 146 150 delta = ktime_to_ns(kt); 147 151 err = ops->adjtime(ops, delta); 148 152 } else if (tx->modes & ADJ_FREQUENCY) {
+34
include/clocksource/arm_arch_timer.h
··· 16 16 #ifndef __CLKSOURCE_ARM_ARCH_TIMER_H 17 17 #define __CLKSOURCE_ARM_ARCH_TIMER_H 18 18 19 + #include <linux/bitops.h> 19 20 #include <linux/timecounter.h> 20 21 #include <linux/types.h> 22 + 23 + #define ARCH_TIMER_TYPE_CP15 BIT(0) 24 + #define ARCH_TIMER_TYPE_MEM BIT(1) 21 25 22 26 #define ARCH_TIMER_CTRL_ENABLE (1 << 0) 23 27 #define ARCH_TIMER_CTRL_IT_MASK (1 << 1) ··· 38 34 ARCH_TIMER_REG_TVAL, 39 35 }; 40 36 37 + enum arch_timer_ppi_nr { 38 + ARCH_TIMER_PHYS_SECURE_PPI, 39 + ARCH_TIMER_PHYS_NONSECURE_PPI, 40 + ARCH_TIMER_VIRT_PPI, 41 + ARCH_TIMER_HYP_PPI, 42 + ARCH_TIMER_MAX_TIMER_PPI 43 + }; 44 + 45 + enum arch_timer_spi_nr { 46 + ARCH_TIMER_PHYS_SPI, 47 + ARCH_TIMER_VIRT_SPI, 48 + ARCH_TIMER_MAX_TIMER_SPI 49 + }; 50 + 41 51 #define ARCH_TIMER_PHYS_ACCESS 0 42 52 #define ARCH_TIMER_VIRT_ACCESS 1 43 53 #define ARCH_TIMER_MEM_PHYS_ACCESS 2 44 54 #define ARCH_TIMER_MEM_VIRT_ACCESS 3 55 + 56 + #define ARCH_TIMER_MEM_MAX_FRAMES 8 45 57 46 58 #define ARCH_TIMER_USR_PCT_ACCESS_EN (1 << 0) /* physical counter */ 47 59 #define ARCH_TIMER_USR_VCT_ACCESS_EN (1 << 1) /* virtual counter */ ··· 72 52 struct arch_timer_kvm_info { 73 53 struct timecounter timecounter; 74 54 int virtual_irq; 55 + }; 56 + 57 + struct arch_timer_mem_frame { 58 + bool valid; 59 + phys_addr_t cntbase; 60 + size_t size; 61 + int phys_irq; 62 + int virt_irq; 63 + }; 64 + 65 + struct arch_timer_mem { 66 + phys_addr_t cntctlbase; 67 + size_t size; 68 + struct arch_timer_mem_frame frame[ARCH_TIMER_MEM_MAX_FRAMES]; 75 69 }; 76 70 77 71 #ifdef CONFIG_ARM_ARCH_TIMER
+7
include/linux/acpi.h
··· 591 591 int acpi_reconfig_notifier_register(struct notifier_block *nb); 592 592 int acpi_reconfig_notifier_unregister(struct notifier_block *nb); 593 593 594 + #ifdef CONFIG_ACPI_GTDT 595 + int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count); 596 + int acpi_gtdt_map_ppi(int type); 597 + bool acpi_gtdt_c3stop(int type); 598 + int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count); 599 + #endif 600 + 594 601 #else /* !CONFIG_ACPI */ 595 602 596 603 #define acpi_disabled 1
-1
include/linux/clockchips.h
··· 182 182 extern void clockevents_register_device(struct clock_event_device *dev); 183 183 extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu); 184 184 185 - extern void clockevents_config(struct clock_event_device *dev, u32 freq); 186 185 extern void clockevents_config_and_register(struct clock_event_device *dev, 187 186 u32 freq, unsigned long min_delta, 188 187 unsigned long max_delta);
+1 -1
include/linux/clocksource.h
··· 120 120 #define CLOCK_SOURCE_RESELECT 0x100 121 121 122 122 /* simplify initialization of mask field */ 123 - #define CLOCKSOURCE_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 123 + #define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0) 124 124 125 125 static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from) 126 126 {
+1 -5
include/linux/hrtimer.h
··· 276 276 return timer->base->cpu_base->hres_active; 277 277 } 278 278 279 - extern void hrtimer_peek_ahead_timers(void); 280 - 281 279 /* 282 280 * The resolution of the clocks. The resolution value is returned in 283 281 * the clock_getres() system call to give application programmers an ··· 297 299 # define KTIME_MONOTONIC_RES KTIME_LOW_RES 298 300 299 301 #define hrtimer_resolution (unsigned int)LOW_RES_NSEC 300 - 301 - static inline void hrtimer_peek_ahead_timers(void) { } 302 302 303 303 static inline int hrtimer_is_hres_active(struct hrtimer *timer) 304 304 { ··· 452 456 } 453 457 454 458 /* Precise sleep: */ 455 - extern long hrtimer_nanosleep(struct timespec *rqtp, 459 + extern long hrtimer_nanosleep(struct timespec64 *rqtp, 456 460 struct timespec __user *rmtp, 457 461 const enum hrtimer_mode mode, 458 462 const clockid_t clockid);
-1
include/linux/irqchip/mips-gic.h
··· 258 258 extern void gic_init(unsigned long gic_base_addr, 259 259 unsigned long gic_addrspace_size, unsigned int cpu_vec, 260 260 unsigned int irqbase); 261 - extern void gic_clocksource_init(unsigned int); 262 261 extern u64 gic_read_count(void); 263 262 extern unsigned int gic_get_count_width(void); 264 263 extern u64 gic_read_compare(void);
+5 -5
include/linux/posix-clock.h
··· 59 59 60 60 int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx); 61 61 62 - int (*clock_gettime)(struct posix_clock *pc, struct timespec *ts); 62 + int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts); 63 63 64 - int (*clock_getres) (struct posix_clock *pc, struct timespec *ts); 64 + int (*clock_getres) (struct posix_clock *pc, struct timespec64 *ts); 65 65 66 66 int (*clock_settime)(struct posix_clock *pc, 67 - const struct timespec *ts); 67 + const struct timespec64 *ts); 68 68 69 69 int (*timer_create) (struct posix_clock *pc, struct k_itimer *kit); 70 70 71 71 int (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit); 72 72 73 73 void (*timer_gettime)(struct posix_clock *pc, 74 - struct k_itimer *kit, struct itimerspec *tsp); 74 + struct k_itimer *kit, struct itimerspec64 *tsp); 75 75 76 76 int (*timer_settime)(struct posix_clock *pc, 77 77 struct k_itimer *kit, int flags, 78 - struct itimerspec *tsp, struct itimerspec *old); 78 + struct itimerspec64 *tsp, struct itimerspec64 *old); 79 79 /* 80 80 * Optional character device methods: 81 81 */
+10 -10
include/linux/posix-timers.h
··· 87 87 }; 88 88 89 89 struct k_clock { 90 - int (*clock_getres) (const clockid_t which_clock, struct timespec *tp); 90 + int (*clock_getres) (const clockid_t which_clock, struct timespec64 *tp); 91 91 int (*clock_set) (const clockid_t which_clock, 92 - const struct timespec *tp); 93 - int (*clock_get) (const clockid_t which_clock, struct timespec * tp); 92 + const struct timespec64 *tp); 93 + int (*clock_get) (const clockid_t which_clock, struct timespec64 *tp); 94 94 int (*clock_adj) (const clockid_t which_clock, struct timex *tx); 95 95 int (*timer_create) (struct k_itimer *timer); 96 96 int (*nsleep) (const clockid_t which_clock, int flags, 97 - struct timespec *, struct timespec __user *); 97 + struct timespec64 *, struct timespec __user *); 98 98 long (*nsleep_restart) (struct restart_block *restart_block); 99 - int (*timer_set) (struct k_itimer * timr, int flags, 100 - struct itimerspec * new_setting, 101 - struct itimerspec * old_setting); 102 - int (*timer_del) (struct k_itimer * timr); 99 + int (*timer_set) (struct k_itimer *timr, int flags, 100 + struct itimerspec64 *new_setting, 101 + struct itimerspec64 *old_setting); 102 + int (*timer_del) (struct k_itimer *timr); 103 103 #define TIMER_RETRY 1 104 - void (*timer_get) (struct k_itimer * timr, 105 - struct itimerspec * cur_setting); 104 + void (*timer_get) (struct k_itimer *timr, 105 + struct itimerspec64 *cur_setting); 106 106 }; 107 107 108 108 extern struct k_clock clock_posix_cpu;
+5 -15
include/linux/timekeeping.h
··· 19 19 extern int do_settimeofday64(const struct timespec64 *ts); 20 20 extern int do_sys_settimeofday64(const struct timespec64 *tv, 21 21 const struct timezone *tz); 22 - static inline int do_sys_settimeofday(const struct timespec *tv, 23 - const struct timezone *tz) 24 - { 25 - struct timespec64 ts64; 26 - 27 - if (!tv) 28 - return do_sys_settimeofday64(NULL, tz); 29 - 30 - if (!timespec_valid(tv)) 31 - return -EINVAL; 32 - 33 - ts64 = timespec_to_timespec64(*tv); 34 - return do_sys_settimeofday64(&ts64, tz); 35 - } 36 - 37 22 /* 38 23 * Kernel time accessors 39 24 */ ··· 256 271 static inline void timekeeping_clocktai(struct timespec *ts) 257 272 { 258 273 *ts = ktime_to_timespec(ktime_get_clocktai()); 274 + } 275 + 276 + static inline void timekeeping_clocktai64(struct timespec64 *ts) 277 + { 278 + *ts = ktime_to_timespec64(ktime_get_clocktai()); 259 279 } 260 280 261 281 /*
+6 -4
kernel/compat.c
··· 108 108 COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv, 109 109 struct timezone __user *, tz) 110 110 { 111 + struct timespec64 new_ts; 111 112 struct timeval user_tv; 112 - struct timespec new_ts; 113 113 struct timezone new_tz; 114 114 115 115 if (tv) { ··· 123 123 return -EFAULT; 124 124 } 125 125 126 - return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); 126 + return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL); 127 127 } 128 128 129 129 static int __compat_get_timeval(struct timeval *tv, const struct compat_timeval __user *ctv) ··· 240 240 struct compat_timespec __user *, rmtp) 241 241 { 242 242 struct timespec tu, rmt; 243 + struct timespec64 tu64; 243 244 mm_segment_t oldfs; 244 245 long ret; 245 246 246 247 if (compat_get_timespec(&tu, rqtp)) 247 248 return -EFAULT; 248 249 249 - if (!timespec_valid(&tu)) 250 + tu64 = timespec_to_timespec64(tu); 251 + if (!timespec64_valid(&tu64)) 250 252 return -EINVAL; 251 253 252 254 oldfs = get_fs(); 253 255 set_fs(KERNEL_DS); 254 - ret = hrtimer_nanosleep(&tu, 256 + ret = hrtimer_nanosleep(&tu64, 255 257 rmtp ? (struct timespec __user *)&rmt : NULL, 256 258 HRTIMER_MODE_REL, CLOCK_MONOTONIC); 257 259 set_fs(oldfs);
+2
kernel/sysctl.c
··· 1176 1176 .maxlen = sizeof(unsigned int), 1177 1177 .mode = 0644, 1178 1178 .proc_handler = timer_migration_handler, 1179 + .extra1 = &zero, 1180 + .extra2 = &one, 1179 1181 }, 1180 1182 #endif 1181 1183 #ifdef CONFIG_BPF_SYSCALL
+14 -13
kernel/time/alarmtimer.c
··· 541 541 * 542 542 * Returns the granularity of underlying alarm base clock 543 543 */ 544 - static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp) 544 + static int alarm_clock_getres(const clockid_t which_clock, struct timespec64 *tp) 545 545 { 546 546 if (!alarmtimer_get_rtcdev()) 547 547 return -EINVAL; ··· 558 558 * 559 559 * Provides the underlying alarm base time. 560 560 */ 561 - static int alarm_clock_get(clockid_t which_clock, struct timespec *tp) 561 + static int alarm_clock_get(clockid_t which_clock, struct timespec64 *tp) 562 562 { 563 563 struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)]; 564 564 565 565 if (!alarmtimer_get_rtcdev()) 566 566 return -EINVAL; 567 567 568 - *tp = ktime_to_timespec(base->gettime()); 568 + *tp = ktime_to_timespec64(base->gettime()); 569 569 return 0; 570 570 } 571 571 ··· 598 598 * Copies out the current itimerspec data 599 599 */ 600 600 static void alarm_timer_get(struct k_itimer *timr, 601 - struct itimerspec *cur_setting) 601 + struct itimerspec64 *cur_setting) 602 602 { 603 603 ktime_t relative_expiry_time = 604 604 alarm_expires_remaining(&(timr->it.alarm.alarmtimer)); 605 605 606 606 if (ktime_to_ns(relative_expiry_time) > 0) { 607 - cur_setting->it_value = ktime_to_timespec(relative_expiry_time); 607 + cur_setting->it_value = ktime_to_timespec64(relative_expiry_time); 608 608 } else { 609 609 cur_setting->it_value.tv_sec = 0; 610 610 cur_setting->it_value.tv_nsec = 0; 611 611 } 612 612 613 - cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval); 613 + cur_setting->it_interval = ktime_to_timespec64(timr->it.alarm.interval); 614 614 } 615 615 616 616 /** ··· 640 640 * Sets the timer to new_setting, and starts the timer. 641 641 */ 642 642 static int alarm_timer_set(struct k_itimer *timr, int flags, 643 - struct itimerspec *new_setting, 644 - struct itimerspec *old_setting) 643 + struct itimerspec64 *new_setting, 644 + struct itimerspec64 *old_setting) 645 645 { 646 646 ktime_t exp; 647 647 ··· 659 659 return TIMER_RETRY; 660 660 661 661 /* start the timer */ 662 - timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval); 663 - exp = timespec_to_ktime(new_setting->it_value); 662 + timr->it.alarm.interval = timespec64_to_ktime(new_setting->it_interval); 663 + exp = timespec64_to_ktime(new_setting->it_value); 664 664 /* Convert (if necessary) to absolute time */ 665 665 if (flags != TIMER_ABSTIME) { 666 666 ktime_t now; ··· 790 790 * Handles clock_nanosleep calls against _ALARM clockids 791 791 */ 792 792 static int alarm_timer_nsleep(const clockid_t which_clock, int flags, 793 - struct timespec *tsreq, struct timespec __user *rmtp) 793 + struct timespec64 *tsreq, 794 + struct timespec __user *rmtp) 794 795 { 795 796 enum alarmtimer_type type = clock2alarm(which_clock); 797 + struct restart_block *restart; 796 798 struct alarm alarm; 797 799 ktime_t exp; 798 800 int ret = 0; 799 - struct restart_block *restart; 800 801 801 802 if (!alarmtimer_get_rtcdev()) 802 803 return -ENOTSUPP; ··· 810 809 811 810 alarm_init(&alarm, type, alarmtimer_nsleep_wakeup); 812 811 813 - exp = timespec_to_ktime(*tsreq); 812 + exp = timespec64_to_ktime(*tsreq); 814 813 /* Convert (if necessary) to absolute time */ 815 814 if (flags != TIMER_ABSTIME) { 816 815 ktime_t now = alarm_bases[type].gettime();
+1 -1
kernel/time/clockevents.c
··· 468 468 } 469 469 EXPORT_SYMBOL_GPL(clockevents_register_device); 470 470 471 - void clockevents_config(struct clock_event_device *dev, u32 freq) 471 + static void clockevents_config(struct clock_event_device *dev, u32 freq) 472 472 { 473 473 u64 sec; 474 474
+7 -8
kernel/time/hrtimer.c
··· 1368 1368 ktime_to_ns(delta)); 1369 1369 } 1370 1370 1371 - /* 1372 - * local version of hrtimer_peek_ahead_timers() called with interrupts 1373 - * disabled. 1374 - */ 1371 + /* called with interrupts disabled */ 1375 1372 static inline void __hrtimer_peek_ahead_timers(void) 1376 1373 { 1377 1374 struct tick_device *td; ··· 1503 1506 return ret; 1504 1507 } 1505 1508 1506 - long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, 1509 + long hrtimer_nanosleep(struct timespec64 *rqtp, struct timespec __user *rmtp, 1507 1510 const enum hrtimer_mode mode, const clockid_t clockid) 1508 1511 { 1509 1512 struct restart_block *restart; ··· 1516 1519 slack = 0; 1517 1520 1518 1521 hrtimer_init_on_stack(&t.timer, clockid, mode); 1519 - hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); 1522 + hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack); 1520 1523 if (do_nanosleep(&t, mode)) 1521 1524 goto out; 1522 1525 ··· 1547 1550 SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, 1548 1551 struct timespec __user *, rmtp) 1549 1552 { 1553 + struct timespec64 tu64; 1550 1554 struct timespec tu; 1551 1555 1552 1556 if (copy_from_user(&tu, rqtp, sizeof(tu))) 1553 1557 return -EFAULT; 1554 1558 1555 - if (!timespec_valid(&tu)) 1559 + tu64 = timespec_to_timespec64(tu); 1560 + if (!timespec64_valid(&tu64)) 1556 1561 return -EINVAL; 1557 1562 1558 - return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); 1563 + return hrtimer_nanosleep(&tu64, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); 1559 1564 } 1560 1565 1561 1566 /*
+5 -5
kernel/time/posix-clock.c
··· 297 297 return err; 298 298 } 299 299 300 - static int pc_clock_gettime(clockid_t id, struct timespec *ts) 300 + static int pc_clock_gettime(clockid_t id, struct timespec64 *ts) 301 301 { 302 302 struct posix_clock_desc cd; 303 303 int err; ··· 316 316 return err; 317 317 } 318 318 319 - static int pc_clock_getres(clockid_t id, struct timespec *ts) 319 + static int pc_clock_getres(clockid_t id, struct timespec64 *ts) 320 320 { 321 321 struct posix_clock_desc cd; 322 322 int err; ··· 335 335 return err; 336 336 } 337 337 338 - static int pc_clock_settime(clockid_t id, const struct timespec *ts) 338 + static int pc_clock_settime(clockid_t id, const struct timespec64 *ts) 339 339 { 340 340 struct posix_clock_desc cd; 341 341 int err; ··· 399 399 return err; 400 400 } 401 401 402 - static void pc_timer_gettime(struct k_itimer *kit, struct itimerspec *ts) 402 + static void pc_timer_gettime(struct k_itimer *kit, struct itimerspec64 *ts) 403 403 { 404 404 clockid_t id = kit->it_clock; 405 405 struct posix_clock_desc cd; ··· 414 414 } 415 415 416 416 static int pc_timer_settime(struct k_itimer *kit, int flags, 417 - struct itimerspec *ts, struct itimerspec *old) 417 + struct itimerspec64 *ts, struct itimerspec64 *old) 418 418 { 419 419 clockid_t id = kit->it_clock; 420 420 struct posix_clock_desc cd;
+42 -33
kernel/time/posix-cpu-timers.c
··· 116 116 } 117 117 118 118 static int 119 - posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) 119 + posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp) 120 120 { 121 121 int error = check_clock(which_clock); 122 122 if (!error) { ··· 135 135 } 136 136 137 137 static int 138 - posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp) 138 + posix_cpu_clock_set(const clockid_t which_clock, const struct timespec64 *tp) 139 139 { 140 140 /* 141 141 * You can never reset a CPU clock, but we check for other errors ··· 261 261 262 262 static int posix_cpu_clock_get_task(struct task_struct *tsk, 263 263 const clockid_t which_clock, 264 - struct timespec *tp) 264 + struct timespec64 *tp) 265 265 { 266 266 int err = -EINVAL; 267 267 u64 rtn; ··· 275 275 } 276 276 277 277 if (!err) 278 - *tp = ns_to_timespec(rtn); 278 + *tp = ns_to_timespec64(rtn); 279 279 280 280 return err; 281 281 } 282 282 283 283 284 - static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) 284 + static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec64 *tp) 285 285 { 286 286 const pid_t pid = CPUCLOCK_PID(which_clock); 287 287 int err = -EINVAL; ··· 562 562 * and try again. (This happens when the timer is in the middle of firing.) 563 563 */ 564 564 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, 565 - struct itimerspec *new, struct itimerspec *old) 565 + struct itimerspec64 *new, struct itimerspec64 *old) 566 566 { 567 567 unsigned long flags; 568 568 struct sighand_struct *sighand; ··· 572 572 573 573 WARN_ON_ONCE(p == NULL); 574 574 575 - new_expires = timespec_to_ns(&new->it_value); 575 + new_expires = timespec64_to_ns(&new->it_value); 576 576 577 577 /* 578 578 * Protect against sighand release/switch in exit/exec and p->cpu_timers ··· 633 633 bump_cpu_timer(timer, val); 634 634 if (val < timer->it.cpu.expires) { 635 635 old_expires = timer->it.cpu.expires - val; 636 - old->it_value = ns_to_timespec(old_expires); 636 + old->it_value = ns_to_timespec64(old_expires); 637 637 } else { 638 638 old->it_value.tv_nsec = 1; 639 639 old->it_value.tv_sec = 0; ··· 671 671 * Install the new reload setting, and 672 672 * set up the signal and overrun bookkeeping. 673 673 */ 674 - timer->it.cpu.incr = timespec_to_ns(&new->it_interval); 674 + timer->it.cpu.incr = timespec64_to_ns(&new->it_interval); 675 675 676 676 /* 677 677 * This acts as a modification timestamp for the timer, ··· 695 695 ret = 0; 696 696 out: 697 697 if (old) 698 - old->it_interval = ns_to_timespec(old_incr); 698 + old->it_interval = ns_to_timespec64(old_incr); 699 699 700 700 return ret; 701 701 } 702 702 703 - static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) 703 + static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp) 704 704 { 705 705 u64 now; 706 706 struct task_struct *p = timer->it.cpu.task; ··· 710 710 /* 711 711 * Easy part: convert the reload time. 712 712 */ 713 - itp->it_interval = ns_to_timespec(timer->it.cpu.incr); 713 + itp->it_interval = ns_to_timespec64(timer->it.cpu.incr); 714 714 715 715 if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */ 716 716 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; ··· 739 739 * Call the timer disarmed, nothing else to do. 740 740 */ 741 741 timer->it.cpu.expires = 0; 742 - itp->it_value = ns_to_timespec(timer->it.cpu.expires); 742 + itp->it_value = ns_to_timespec64(timer->it.cpu.expires); 743 743 return; 744 744 } else { 745 745 cpu_timer_sample_group(timer->it_clock, p, &now); ··· 748 748 } 749 749 750 750 if (now < timer->it.cpu.expires) { 751 - itp->it_value = ns_to_timespec(timer->it.cpu.expires - now); 751 + itp->it_value = ns_to_timespec64(timer->it.cpu.expires - now); 752 752 } else { 753 753 /* 754 754 * The timer should have expired already, but the firing ··· 825 825 * At the hard limit, we just die. 826 826 * No need to calculate anything else now. 827 827 */ 828 + pr_info("CPU Watchdog Timeout (hard): %s[%d]\n", 829 + tsk->comm, task_pid_nr(tsk)); 828 830 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 829 831 return; 830 832 } ··· 838 836 soft += USEC_PER_SEC; 839 837 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft; 840 838 } 841 - printk(KERN_INFO 842 - "RT Watchdog Timeout: %s[%d]\n", 839 + pr_info("RT Watchdog Timeout (soft): %s[%d]\n", 843 840 tsk->comm, task_pid_nr(tsk)); 844 841 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 845 842 } ··· 936 935 * At the hard limit, we just die. 937 936 * No need to calculate anything else now. 938 937 */ 938 + pr_info("RT Watchdog Timeout (hard): %s[%d]\n", 939 + tsk->comm, task_pid_nr(tsk)); 939 940 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 940 941 return; 941 942 } ··· 945 942 /* 946 943 * At the soft limit, send a SIGXCPU every second. 947 944 */ 945 + pr_info("CPU Watchdog Timeout (soft): %s[%d]\n", 946 + tsk->comm, task_pid_nr(tsk)); 948 947 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 949 948 if (soft < hard) { 950 949 soft++; ··· 1219 1214 } 1220 1215 1221 1216 static int do_cpu_nanosleep(const clockid_t which_clock, int flags, 1222 - struct timespec *rqtp, struct itimerspec *it) 1217 + struct timespec64 *rqtp, struct itimerspec64 *it) 1223 1218 { 1224 1219 struct k_itimer timer; 1225 1220 int error; ··· 1234 1229 error = posix_cpu_timer_create(&timer); 1235 1230 timer.it_process = current; 1236 1231 if (!error) { 1237 - static struct itimerspec zero_it; 1232 + static struct itimerspec64 zero_it; 1238 1233 1239 1234 memset(it, 0, sizeof *it); 1240 1235 it->it_value = *rqtp; ··· 1269 1264 /* 1270 1265 * We were interrupted by a signal. 1271 1266 */ 1272 - *rqtp = ns_to_timespec(timer.it.cpu.expires); 1267 + *rqtp = ns_to_timespec64(timer.it.cpu.expires); 1273 1268 error = posix_cpu_timer_set(&timer, 0, &zero_it, it); 1274 1269 if (!error) { 1275 1270 /* ··· 1306 1301 static long posix_cpu_nsleep_restart(struct restart_block *restart_block); 1307 1302 1308 1303 static int posix_cpu_nsleep(const clockid_t which_clock, int flags, 1309 - struct timespec *rqtp, struct timespec __user *rmtp) 1304 + struct timespec64 *rqtp, struct timespec __user *rmtp) 1310 1305 { 1311 1306 struct restart_block *restart_block = &current->restart_block; 1312 - struct itimerspec it; 1307 + struct itimerspec64 it; 1308 + struct timespec ts; 1313 1309 int error; 1314 1310 1315 1311 /* ··· 1330 1324 /* 1331 1325 * Report back to the user the time still remaining. 1332 1326 */ 1333 - if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) 1327 + ts = timespec64_to_timespec(it.it_value); 1328 + if (rmtp && copy_to_user(rmtp, &ts, sizeof(*rmtp))) 1334 1329 return -EFAULT; 1335 1330 1336 1331 restart_block->fn = posix_cpu_nsleep_restart; 1337 1332 restart_block->nanosleep.clockid = which_clock; 1338 1333 restart_block->nanosleep.rmtp = rmtp; 1339 - restart_block->nanosleep.expires = timespec_to_ns(rqtp); 1334 + restart_block->nanosleep.expires = timespec64_to_ns(rqtp); 1340 1335 } 1341 1336 return error; 1342 1337 } ··· 1345 1338 static long posix_cpu_nsleep_restart(struct restart_block *restart_block) 1346 1339 { 1347 1340 clockid_t which_clock = restart_block->nanosleep.clockid; 1348 - struct timespec t; 1349 - struct itimerspec it; 1341 + struct itimerspec64 it; 1342 + struct timespec64 t; 1343 + struct timespec tmp; 1350 1344 int error; 1351 1345 1352 - t = ns_to_timespec(restart_block->nanosleep.expires); 1346 + t = ns_to_timespec64(restart_block->nanosleep.expires); 1353 1347 1354 1348 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it); 1355 1349 ··· 1359 1351 /* 1360 1352 * Report back to the user the time still remaining. 1361 1353 */ 1362 - if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) 1354 + tmp = timespec64_to_timespec(it.it_value); 1355 + if (rmtp && copy_to_user(rmtp, &tmp, sizeof(*rmtp))) 1363 1356 return -EFAULT; 1364 1357 1365 - restart_block->nanosleep.expires = timespec_to_ns(&t); 1358 + restart_block->nanosleep.expires = timespec64_to_ns(&t); 1366 1359 } 1367 1360 return error; 1368 1361 ··· 1373 1364 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED) 1374 1365 1375 1366 static int process_cpu_clock_getres(const clockid_t which_clock, 1376 - struct timespec *tp) 1367 + struct timespec64 *tp) 1377 1368 { 1378 1369 return posix_cpu_clock_getres(PROCESS_CLOCK, tp); 1379 1370 } 1380 1371 static int process_cpu_clock_get(const clockid_t which_clock, 1381 - struct timespec *tp) 1372 + struct timespec64 *tp) 1382 1373 { 1383 1374 return posix_cpu_clock_get(PROCESS_CLOCK, tp); 1384 1375 } ··· 1388 1379 return posix_cpu_timer_create(timer); 1389 1380 } 1390 1381 static int process_cpu_nsleep(const clockid_t which_clock, int flags, 1391 - struct timespec *rqtp, 1382 + struct timespec64 *rqtp, 1392 1383 struct timespec __user *rmtp) 1393 1384 { 1394 1385 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp); ··· 1398 1389 return -EINVAL; 1399 1390 } 1400 1391 static int thread_cpu_clock_getres(const clockid_t which_clock, 1401 - struct timespec *tp) 1392 + struct timespec64 *tp) 1402 1393 { 1403 1394 return posix_cpu_clock_getres(THREAD_CLOCK, tp); 1404 1395 } 1405 1396 static int thread_cpu_clock_get(const clockid_t which_clock, 1406 - struct timespec *tp) 1397 + struct timespec64 *tp) 1407 1398 { 1408 1399 return posix_cpu_clock_get(THREAD_CLOCK, tp); 1409 1400 }
+14 -6
kernel/time/posix-stubs.c
··· 49 49 SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, 50 50 const struct timespec __user *, tp) 51 51 { 52 + struct timespec64 new_tp64; 52 53 struct timespec new_tp; 53 54 54 55 if (which_clock != CLOCK_REALTIME) 55 56 return -EINVAL; 56 57 if (copy_from_user(&new_tp, tp, sizeof (*tp))) 57 58 return -EFAULT; 58 - return do_sys_settimeofday(&new_tp, NULL); 59 + 60 + new_tp64 = timespec_to_timespec64(new_tp); 61 + return do_sys_settimeofday64(&new_tp64, NULL); 59 62 } 60 63 61 64 SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, 62 65 struct timespec __user *,tp) 63 66 { 67 + struct timespec64 kernel_tp64; 64 68 struct timespec kernel_tp; 65 69 66 70 switch (which_clock) { 67 - case CLOCK_REALTIME: ktime_get_real_ts(&kernel_tp); break; 68 - case CLOCK_MONOTONIC: ktime_get_ts(&kernel_tp); break; 69 - case CLOCK_BOOTTIME: get_monotonic_boottime(&kernel_tp); break; 71 + case CLOCK_REALTIME: ktime_get_real_ts64(&kernel_tp64); break; 72 + case CLOCK_MONOTONIC: ktime_get_ts64(&kernel_tp64); break; 73 + case CLOCK_BOOTTIME: get_monotonic_boottime64(&kernel_tp64); break; 70 74 default: return -EINVAL; 71 75 } 76 + 77 + kernel_tp = timespec64_to_timespec(kernel_tp64); 72 78 if (copy_to_user(tp, &kernel_tp, sizeof (kernel_tp))) 73 79 return -EFAULT; 74 80 return 0; ··· 103 97 const struct timespec __user *, rqtp, 104 98 struct timespec __user *, rmtp) 105 99 { 100 + struct timespec64 t64; 106 101 struct timespec t; 107 102 108 103 switch (which_clock) { ··· 112 105 case CLOCK_BOOTTIME: 113 106 if (copy_from_user(&t, rqtp, sizeof (struct timespec))) 114 107 return -EFAULT; 115 - if (!timespec_valid(&t)) 108 + t64 = timespec_to_timespec64(t); 109 + if (!timespec64_valid(&t64)) 116 110 return -EINVAL; 117 - return hrtimer_nanosleep(&t, rmtp, flags & TIMER_ABSTIME ? 111 + return hrtimer_nanosleep(&t64, rmtp, flags & TIMER_ABSTIME ? 118 112 HRTIMER_MODE_ABS : HRTIMER_MODE_REL, 119 113 which_clock); 120 114 default:
+55 -42
kernel/time/posix-timers.c
··· 130 130 /* 131 131 * These ones are defined below. 132 132 */ 133 - static int common_nsleep(const clockid_t, int flags, struct timespec *t, 133 + static int common_nsleep(const clockid_t, int flags, struct timespec64 *t, 134 134 struct timespec __user *rmtp); 135 135 static int common_timer_create(struct k_itimer *new_timer); 136 - static void common_timer_get(struct k_itimer *, struct itimerspec *); 136 + static void common_timer_get(struct k_itimer *, struct itimerspec64 *); 137 137 static int common_timer_set(struct k_itimer *, int, 138 - struct itimerspec *, struct itimerspec *); 138 + struct itimerspec64 *, struct itimerspec64 *); 139 139 static int common_timer_del(struct k_itimer *timer); 140 140 141 141 static enum hrtimer_restart posix_timer_fn(struct hrtimer *data); ··· 204 204 } 205 205 206 206 /* Get clock_realtime */ 207 - static int posix_clock_realtime_get(clockid_t which_clock, struct timespec *tp) 207 + static int posix_clock_realtime_get(clockid_t which_clock, struct timespec64 *tp) 208 208 { 209 - ktime_get_real_ts(tp); 209 + ktime_get_real_ts64(tp); 210 210 return 0; 211 211 } 212 212 213 213 /* Set clock_realtime */ 214 214 static int posix_clock_realtime_set(const clockid_t which_clock, 215 - const struct timespec *tp) 215 + const struct timespec64 *tp) 216 216 { 217 - return do_sys_settimeofday(tp, NULL); 217 + return do_sys_settimeofday64(tp, NULL); 218 218 } 219 219 220 220 static int posix_clock_realtime_adj(const clockid_t which_clock, ··· 226 226 /* 227 227 * Get monotonic time for posix timers 228 228 */ 229 - static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp) 229 + static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp) 230 230 { 231 - ktime_get_ts(tp); 231 + ktime_get_ts64(tp); 232 232 return 0; 233 233 } 234 234 235 235 /* 236 236 * Get monotonic-raw time for posix timers 237 237 */ 238 - static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp) 238 + static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp) 239 239 { 240 - getrawmonotonic(tp); 240 + getrawmonotonic64(tp); 241 241 return 0; 242 242 } 243 243 244 244 245 - static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp) 245 + static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp) 246 246 { 247 - *tp = current_kernel_time(); 247 + *tp = current_kernel_time64(); 248 248 return 0; 249 249 } 250 250 251 251 static int posix_get_monotonic_coarse(clockid_t which_clock, 252 - struct timespec *tp) 252 + struct timespec64 *tp) 253 253 { 254 - *tp = get_monotonic_coarse(); 254 + *tp = get_monotonic_coarse64(); 255 255 return 0; 256 256 } 257 257 258 - static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp) 258 + static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp) 259 259 { 260 - *tp = ktime_to_timespec(KTIME_LOW_RES); 260 + *tp = ktime_to_timespec64(KTIME_LOW_RES); 261 261 return 0; 262 262 } 263 263 264 - static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp) 264 + static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp) 265 265 { 266 - get_monotonic_boottime(tp); 266 + get_monotonic_boottime64(tp); 267 267 return 0; 268 268 } 269 269 270 - static int posix_get_tai(clockid_t which_clock, struct timespec *tp) 270 + static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp) 271 271 { 272 - timekeeping_clocktai(tp); 272 + timekeeping_clocktai64(tp); 273 273 return 0; 274 274 } 275 275 276 - static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec *tp) 276 + static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp) 277 277 { 278 278 tp->tv_sec = 0; 279 279 tp->tv_nsec = hrtimer_resolution; ··· 734 734 * report. 735 735 */ 736 736 static void 737 - common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) 737 + common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting) 738 738 { 739 739 ktime_t now, remaining, iv; 740 740 struct hrtimer *timer = &timr->it.real.timer; 741 741 742 - memset(cur_setting, 0, sizeof(struct itimerspec)); 742 + memset(cur_setting, 0, sizeof(*cur_setting)); 743 743 744 744 iv = timr->it.real.interval; 745 745 746 746 /* interval timer ? */ 747 747 if (iv) 748 - cur_setting->it_interval = ktime_to_timespec(iv); 748 + cur_setting->it_interval = ktime_to_timespec64(iv); 749 749 else if (!hrtimer_active(timer) && 750 750 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) 751 751 return; ··· 771 771 if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) 772 772 cur_setting->it_value.tv_nsec = 1; 773 773 } else 774 - cur_setting->it_value = ktime_to_timespec(remaining); 774 + cur_setting->it_value = ktime_to_timespec64(remaining); 775 775 } 776 776 777 777 /* Get the time remaining on a POSIX.1b interval timer. */ 778 778 SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, 779 779 struct itimerspec __user *, setting) 780 780 { 781 + struct itimerspec64 cur_setting64; 781 782 struct itimerspec cur_setting; 782 783 struct k_itimer *timr; 783 784 struct k_clock *kc; ··· 793 792 if (WARN_ON_ONCE(!kc || !kc->timer_get)) 794 793 ret = -EINVAL; 795 794 else 796 - kc->timer_get(timr, &cur_setting); 795 + kc->timer_get(timr, &cur_setting64); 797 796 798 797 unlock_timer(timr, flags); 799 798 799 + cur_setting = itimerspec64_to_itimerspec(&cur_setting64); 800 800 if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting))) 801 801 return -EFAULT; 802 802 ··· 833 831 /* timr->it_lock is taken. */ 834 832 static int 835 833 common_timer_set(struct k_itimer *timr, int flags, 836 - struct itimerspec *new_setting, struct itimerspec *old_setting) 834 + struct itimerspec64 *new_setting, struct itimerspec64 *old_setting) 837 835 { 838 836 struct hrtimer *timer = &timr->it.real.timer; 839 837 enum hrtimer_mode mode; ··· 862 860 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); 863 861 timr->it.real.timer.function = posix_timer_fn; 864 862 865 - hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value)); 863 + hrtimer_set_expires(timer, timespec64_to_ktime(new_setting->it_value)); 866 864 867 865 /* Convert interval */ 868 - timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); 866 + timr->it.real.interval = timespec64_to_ktime(new_setting->it_interval); 869 867 870 868 /* SIGEV_NONE timers are not queued ! See common_timer_get */ 871 869 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) { ··· 885 883 const struct itimerspec __user *, new_setting, 886 884 struct itimerspec __user *, old_setting) 887 885 { 888 - struct k_itimer *timr; 886 + struct itimerspec64 new_spec64, old_spec64; 887 + struct itimerspec64 *rtn = old_setting ? &old_spec64 : NULL; 889 888 struct itimerspec new_spec, old_spec; 890 - int error = 0; 889 + struct k_itimer *timr; 891 890 unsigned long flag; 892 - struct itimerspec *rtn = old_setting ? &old_spec : NULL; 893 891 struct k_clock *kc; 892 + int error = 0; 894 893 895 894 if (!new_setting) 896 895 return -EINVAL; 897 896 898 897 if (copy_from_user(&new_spec, new_setting, sizeof (new_spec))) 899 898 return -EFAULT; 899 + new_spec64 = itimerspec_to_itimerspec64(&new_spec); 900 900 901 - if (!timespec_valid(&new_spec.it_interval) || 902 - !timespec_valid(&new_spec.it_value)) 901 + if (!timespec64_valid(&new_spec64.it_interval) || 902 + !timespec64_valid(&new_spec64.it_value)) 903 903 return -EINVAL; 904 904 retry: 905 905 timr = lock_timer(timer_id, &flag); ··· 912 908 if (WARN_ON_ONCE(!kc || !kc->timer_set)) 913 909 error = -EINVAL; 914 910 else 915 - error = kc->timer_set(timr, flags, &new_spec, rtn); 911 + error = kc->timer_set(timr, flags, &new_spec64, rtn); 916 912 917 913 unlock_timer(timr, flag); 918 914 if (error == TIMER_RETRY) { ··· 920 916 goto retry; 921 917 } 922 918 919 + old_spec = itimerspec64_to_itimerspec(&old_spec64); 923 920 if (old_setting && !error && 924 921 copy_to_user(old_setting, &old_spec, sizeof (old_spec))) 925 922 error = -EFAULT; ··· 1019 1014 const struct timespec __user *, tp) 1020 1015 { 1021 1016 struct k_clock *kc = clockid_to_kclock(which_clock); 1017 + struct timespec64 new_tp64; 1022 1018 struct timespec new_tp; 1023 1019 1024 1020 if (!kc || !kc->clock_set) ··· 1027 1021 1028 1022 if (copy_from_user(&new_tp, tp, sizeof (*tp))) 1029 1023 return -EFAULT; 1024 + new_tp64 = timespec_to_timespec64(new_tp); 1030 1025 1031 - return kc->clock_set(which_clock, &new_tp); 1026 + return kc->clock_set(which_clock, &new_tp64); 1032 1027 } 1033 1028 1034 1029 SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, 1035 1030 struct timespec __user *,tp) 1036 1031 { 1037 1032 struct k_clock *kc = clockid_to_kclock(which_clock); 1033 + struct timespec64 kernel_tp64; 1038 1034 struct timespec kernel_tp; 1039 1035 int error; 1040 1036 1041 1037 if (!kc) 1042 1038 return -EINVAL; 1043 1039 1044 - error = kc->clock_get(which_clock, &kernel_tp); 1040 + error = kc->clock_get(which_clock, &kernel_tp64); 1041 + kernel_tp = timespec64_to_timespec(kernel_tp64); 1045 1042 1046 1043 if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp))) 1047 1044 error = -EFAULT; ··· 1079 1070 struct timespec __user *, tp) 1080 1071 { 1081 1072 struct k_clock *kc = clockid_to_kclock(which_clock); 1073 + struct timespec64 rtn_tp64; 1082 1074 struct timespec rtn_tp; 1083 1075 int error; 1084 1076 1085 1077 if (!kc) 1086 1078 return -EINVAL; 1087 1079 1088 - error = kc->clock_getres(which_clock, &rtn_tp); 1080 + error = kc->clock_getres(which_clock, &rtn_tp64); 1081 + rtn_tp = timespec64_to_timespec(rtn_tp64); 1089 1082 1090 1083 if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) 1091 1084 error = -EFAULT; ··· 1099 1088 * nanosleep for monotonic and realtime clocks 1100 1089 */ 1101 1090 static int common_nsleep(const clockid_t which_clock, int flags, 1102 - struct timespec *tsave, struct timespec __user *rmtp) 1091 + struct timespec64 *tsave, struct timespec __user *rmtp) 1103 1092 { 1104 1093 return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ? 1105 1094 HRTIMER_MODE_ABS : HRTIMER_MODE_REL, ··· 1111 1100 struct timespec __user *, rmtp) 1112 1101 { 1113 1102 struct k_clock *kc = clockid_to_kclock(which_clock); 1103 + struct timespec64 t64; 1114 1104 struct timespec t; 1115 1105 1116 1106 if (!kc) ··· 1122 1110 if (copy_from_user(&t, rqtp, sizeof (struct timespec))) 1123 1111 return -EFAULT; 1124 1112 1125 - if (!timespec_valid(&t)) 1113 + t64 = timespec_to_timespec64(t); 1114 + if (!timespec64_valid(&t64)) 1126 1115 return -EINVAL; 1127 1116 1128 - return kc->nsleep(which_clock, flags, &t, rmtp); 1117 + return kc->nsleep(which_clock, flags, &t64, rmtp); 1129 1118 } 1130 1119 1131 1120 /*
+5
kernel/time/sched_clock.c
··· 206 206 207 207 update_clock_read_data(&rd); 208 208 209 + if (sched_clock_timer.function != NULL) { 210 + /* update timeout for clock wrap */ 211 + hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); 212 + } 213 + 209 214 r = rate; 210 215 if (r >= 4000000) { 211 216 r /= 1000000;
+2 -2
kernel/time/time.c
··· 193 193 SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv, 194 194 struct timezone __user *, tz) 195 195 { 196 + struct timespec64 new_ts; 196 197 struct timeval user_tv; 197 - struct timespec new_ts; 198 198 struct timezone new_tz; 199 199 200 200 if (tv) { ··· 212 212 return -EFAULT; 213 213 } 214 214 215 - return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); 215 + return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL); 216 216 } 217 217 218 218 SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
+1 -2
kernel/time/timekeeping.c
··· 996 996 return 0; 997 997 998 998 /* Interpolate shortest distance from beginning or end of history */ 999 - interp_forward = partial_history_cycles > total_history_cycles/2 ? 1000 - true : false; 999 + interp_forward = partial_history_cycles > total_history_cycles / 2; 1001 1000 partial_history_cycles = interp_forward ? 1002 1001 total_history_cycles - partial_history_cycles : 1003 1002 partial_history_cycles;
+1 -1
kernel/time/timer.c
··· 241 241 int ret; 242 242 243 243 mutex_lock(&mutex); 244 - ret = proc_dointvec(table, write, buffer, lenp, ppos); 244 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 245 245 if (!ret && write) 246 246 timers_update_migration(false); 247 247 mutex_unlock(&mutex);
+6
kernel/time/timer_list.c
··· 16 16 #include <linux/sched.h> 17 17 #include <linux/seq_file.h> 18 18 #include <linux/kallsyms.h> 19 + #include <linux/nmi.h> 19 20 20 21 #include <linux/uaccess.h> 21 22 ··· 87 86 88 87 next_one: 89 88 i = 0; 89 + 90 + touch_nmi_watchdog(); 91 + 90 92 raw_spin_lock_irqsave(&base->cpu_base->lock, flags); 91 93 92 94 curr = timerqueue_getnext(&base->active); ··· 200 196 print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) 201 197 { 202 198 struct clock_event_device *dev = td->evtdev; 199 + 200 + touch_nmi_watchdog(); 203 201 204 202 SEQ_printf(m, "Tick Device: mode: %d\n", td->mode); 205 203 if (cpu < 0)