Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'timers/clockevents-next' of git://git.linaro.org/people/dlezcano/clockevents into timers/core

* Support for memory mapped arch_timers
* Trivial fixes to the moxart timer code
* Documentation updates

Trivial conflicts in drivers/clocksource/arm_arch_timer.c. Fixed up
the newly added __cpuinit annotations as well.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

+464 -104
+56 -3
Documentation/devicetree/bindings/arm/arch_timer.txt
··· 1 1 * ARM architected timer 2 2 3 - ARM cores may have a per-core architected timer, which provides per-cpu timers. 3 + ARM cores may have a per-core architected timer, which provides per-cpu timers, 4 + or a memory mapped architected timer, which provides up to 8 frames with a 5 + physical and optional virtual timer per frame. 4 6 5 - The timer is attached to a GIC to deliver its per-processor interrupts. 7 + The per-core architected timer is attached to a GIC to deliver its 8 + per-processor interrupts via PPIs. The memory mapped timer is attached to a GIC 9 + to deliver its interrupts via SPIs. 6 10 7 - ** Timer node properties: 11 + ** CP15 Timer node properties: 8 12 9 13 - compatible : Should at least contain one of 10 14 "arm,armv7-timer" ··· 29 25 <1 11 0xf08>, 30 26 <1 10 0xf08>; 31 27 clock-frequency = <100000000>; 28 + }; 29 + 30 + ** Memory mapped timer node properties: 31 + 32 + - compatible : Should at least contain "arm,armv7-timer-mem". 33 + 34 + - clock-frequency : The frequency of the main counter, in Hz. Optional. 35 + 36 + - reg : The control frame base address. 37 + 38 + Note that #address-cells, #size-cells, and ranges shall be present to ensure 39 + the CPU can address a frame's registers. 40 + 41 + A timer node has up to 8 frame sub-nodes, each with the following properties: 42 + 43 + - frame-number: 0 to 7. 44 + 45 + - interrupts : Interrupt list for physical and virtual timers in that order. 46 + The virtual timer interrupt is optional. 47 + 48 + - reg : The first and second view base addresses in that order. The second view 49 + base address is optional. 50 + 51 + - status : "disabled" indicates the frame is not available for use. Optional. 52 + 53 + Example: 54 + 55 + timer@f0000000 { 56 + compatible = "arm,armv7-timer-mem"; 57 + #address-cells = <1>; 58 + #size-cells = <1>; 59 + ranges; 60 + reg = <0xf0000000 0x1000>; 61 + clock-frequency = <50000000>; 62 + 63 + frame@f0001000 { 64 + frame-number = <0> 65 + interrupts = <0 13 0x8>, 66 + <0 14 0x8>; 67 + reg = <0xf0001000 0x1000>, 68 + <0xf0002000 0x1000>; 69 + }; 70 + 71 + frame@f0003000 { 72 + frame-number = <1> 73 + interrupts = <0 15 0x8>; 74 + reg = <0xf0003000 0x1000>; 75 + status = "disabled"; 76 + }; 32 77 };
+3 -3
Documentation/devicetree/bindings/timer/moxa,moxart-timer.txt
··· 2 2 3 3 Required properties: 4 4 5 - - compatible : Should be "moxa,moxart-timer" 5 + - compatible : Must be "moxa,moxart-timer" 6 6 - reg : Should contain registers location and length 7 7 - interrupts : Should contain the timer interrupt number 8 - - clocks : Should contain phandle for APB clock "clkapb" 8 + - clocks : Should contain phandle for the clock that drives the counter 9 9 10 10 Example: 11 11 ··· 13 13 compatible = "moxa,moxart-timer"; 14 14 reg = <0x98400000 0x42>; 15 15 interrupts = <19 1>; 16 - clocks = <&clkapb>; 16 + clocks = <&coreclk>; 17 17 };
+6 -8
arch/arm/include/asm/arch_timer.h
··· 17 17 * nicely work out which register we want, and chuck away the rest of 18 18 * the code. At least it does so with a recent GCC (4.6.3). 19 19 */ 20 - static inline void arch_timer_reg_write(const int access, const int reg, u32 val) 20 + static __always_inline 21 + void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val) 21 22 { 22 23 if (access == ARCH_TIMER_PHYS_ACCESS) { 23 24 switch (reg) { ··· 29 28 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); 30 29 break; 31 30 } 32 - } 33 - 34 - if (access == ARCH_TIMER_VIRT_ACCESS) { 31 + } else if (access == ARCH_TIMER_VIRT_ACCESS) { 35 32 switch (reg) { 36 33 case ARCH_TIMER_REG_CTRL: 37 34 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); ··· 43 44 isb(); 44 45 } 45 46 46 - static inline u32 arch_timer_reg_read(const int access, const int reg) 47 + static __always_inline 48 + u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg) 47 49 { 48 50 u32 val = 0; 49 51 ··· 57 57 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); 58 58 break; 59 59 } 60 - } 61 - 62 - if (access == ARCH_TIMER_VIRT_ACCESS) { 60 + } else if (access == ARCH_TIMER_VIRT_ACCESS) { 63 61 switch (reg) { 64 62 case ARCH_TIMER_REG_CTRL: 65 63 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
+9 -14
arch/arm64/include/asm/arch_timer.h
··· 26 26 27 27 #include <clocksource/arm_arch_timer.h> 28 28 29 - static inline void arch_timer_reg_write(int access, int reg, u32 val) 29 + /* 30 + * These register accessors are marked inline so the compiler can 31 + * nicely work out which register we want, and chuck away the rest of 32 + * the code. 33 + */ 34 + static __always_inline 35 + void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val) 30 36 { 31 37 if (access == ARCH_TIMER_PHYS_ACCESS) { 32 38 switch (reg) { ··· 42 36 case ARCH_TIMER_REG_TVAL: 43 37 asm volatile("msr cntp_tval_el0, %0" : : "r" (val)); 44 38 break; 45 - default: 46 - BUILD_BUG(); 47 39 } 48 40 } else if (access == ARCH_TIMER_VIRT_ACCESS) { 49 41 switch (reg) { ··· 51 47 case ARCH_TIMER_REG_TVAL: 52 48 asm volatile("msr cntv_tval_el0, %0" : : "r" (val)); 53 49 break; 54 - default: 55 - BUILD_BUG(); 56 50 } 57 - } else { 58 - BUILD_BUG(); 59 51 } 60 52 61 53 isb(); 62 54 } 63 55 64 - static inline u32 arch_timer_reg_read(int access, int reg) 56 + static __always_inline 57 + u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg) 65 58 { 66 59 u32 val; 67 60 ··· 70 69 case ARCH_TIMER_REG_TVAL: 71 70 asm volatile("mrs %0, cntp_tval_el0" : "=r" (val)); 72 71 break; 73 - default: 74 - BUILD_BUG(); 75 72 } 76 73 } else if (access == ARCH_TIMER_VIRT_ACCESS) { 77 74 switch (reg) { ··· 79 80 case ARCH_TIMER_REG_TVAL: 80 81 asm volatile("mrs %0, cntv_tval_el0" : "=r" (val)); 81 82 break; 82 - default: 83 - BUILD_BUG(); 84 83 } 85 - } else { 86 - BUILD_BUG(); 87 84 } 88 85 89 86 return val;
+382 -73
drivers/clocksource/arm_arch_timer.c
··· 16 16 #include <linux/clockchips.h> 17 17 #include <linux/interrupt.h> 18 18 #include <linux/of_irq.h> 19 + #include <linux/of_address.h> 19 20 #include <linux/io.h> 21 + #include <linux/slab.h> 20 22 21 23 #include <asm/arch_timer.h> 22 24 #include <asm/virt.h> 23 25 24 26 #include <clocksource/arm_arch_timer.h> 27 + 28 + #define CNTTIDR 0x08 29 + #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) 30 + 31 + #define CNTVCT_LO 0x08 32 + #define CNTVCT_HI 0x0c 33 + #define CNTFRQ 0x10 34 + #define CNTP_TVAL 0x28 35 + #define CNTP_CTL 0x2c 36 + #define CNTV_TVAL 0x38 37 + #define CNTV_CTL 0x3c 38 + 39 + #define ARCH_CP15_TIMER BIT(0) 40 + #define ARCH_MEM_TIMER BIT(1) 41 + static unsigned arch_timers_present __initdata; 42 + 43 + static void __iomem *arch_counter_base; 44 + 45 + struct arch_timer { 46 + void __iomem *base; 47 + struct clock_event_device evt; 48 + }; 49 + 50 + #define to_arch_timer(e) container_of(e, struct arch_timer, evt) 25 51 26 52 static u32 arch_timer_rate; 27 53 ··· 64 38 static struct clock_event_device __percpu *arch_timer_evt; 65 39 66 40 static bool arch_timer_use_virtual = true; 41 + static bool arch_timer_mem_use_virtual; 67 42 68 43 /* 69 44 * Architected system timer support. 70 45 */ 71 46 72 - static inline irqreturn_t timer_handler(const int access, 47 + static __always_inline 48 + void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, 49 + struct clock_event_device *clk) 50 + { 51 + if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 52 + struct arch_timer *timer = to_arch_timer(clk); 53 + switch (reg) { 54 + case ARCH_TIMER_REG_CTRL: 55 + writel_relaxed(val, timer->base + CNTP_CTL); 56 + break; 57 + case ARCH_TIMER_REG_TVAL: 58 + writel_relaxed(val, timer->base + CNTP_TVAL); 59 + break; 60 + } 61 + } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 62 + struct arch_timer *timer = to_arch_timer(clk); 63 + switch (reg) { 64 + case ARCH_TIMER_REG_CTRL: 65 + writel_relaxed(val, timer->base + CNTV_CTL); 66 + break; 67 + case ARCH_TIMER_REG_TVAL: 68 + writel_relaxed(val, timer->base + CNTV_TVAL); 69 + break; 70 + } 71 + } else { 72 + arch_timer_reg_write_cp15(access, reg, val); 73 + } 74 + } 75 + 76 + static __always_inline 77 + u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, 78 + struct clock_event_device *clk) 79 + { 80 + u32 val; 81 + 82 + if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 83 + struct arch_timer *timer = to_arch_timer(clk); 84 + switch (reg) { 85 + case ARCH_TIMER_REG_CTRL: 86 + val = readl_relaxed(timer->base + CNTP_CTL); 87 + break; 88 + case ARCH_TIMER_REG_TVAL: 89 + val = readl_relaxed(timer->base + CNTP_TVAL); 90 + break; 91 + } 92 + } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 93 + struct arch_timer *timer = to_arch_timer(clk); 94 + switch (reg) { 95 + case ARCH_TIMER_REG_CTRL: 96 + val = readl_relaxed(timer->base + CNTV_CTL); 97 + break; 98 + case ARCH_TIMER_REG_TVAL: 99 + val = readl_relaxed(timer->base + CNTV_TVAL); 100 + break; 101 + } 102 + } else { 103 + val = arch_timer_reg_read_cp15(access, reg); 104 + } 105 + 106 + return val; 107 + } 108 + 109 + static __always_inline irqreturn_t timer_handler(const int access, 73 110 struct clock_event_device *evt) 74 111 { 75 112 unsigned long ctrl; 76 - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); 113 + 114 + ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt); 77 115 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { 78 116 ctrl |= ARCH_TIMER_CTRL_IT_MASK; 79 - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); 117 + arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt); 80 118 evt->event_handler(evt); 81 119 return IRQ_HANDLED; 82 120 } ··· 162 72 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); 163 73 } 164 74 165 - static inline void timer_set_mode(const int access, int mode) 75 + static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id) 76 + { 77 + struct clock_event_device *evt = dev_id; 78 + 79 + return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt); 80 + } 81 + 82 + static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id) 83 + { 84 + struct clock_event_device *evt = dev_id; 85 + 86 + return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt); 87 + } 88 + 89 + static __always_inline void timer_set_mode(const int access, int mode, 90 + struct clock_event_device *clk) 166 91 { 167 92 unsigned long ctrl; 168 93 switch (mode) { 169 94 case CLOCK_EVT_MODE_UNUSED: 170 95 case CLOCK_EVT_MODE_SHUTDOWN: 171 - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); 96 + ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 172 97 ctrl &= ~ARCH_TIMER_CTRL_ENABLE; 173 - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); 98 + arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 174 99 break; 175 100 default: 176 101 break; ··· 195 90 static void arch_timer_set_mode_virt(enum clock_event_mode mode, 196 91 struct clock_event_device *clk) 197 92 { 198 - timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode); 93 + timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk); 199 94 } 200 95 201 96 static void arch_timer_set_mode_phys(enum clock_event_mode mode, 202 97 struct clock_event_device *clk) 203 98 { 204 - timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode); 99 + timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk); 205 100 } 206 101 207 - static inline void set_next_event(const int access, unsigned long evt) 102 + static void arch_timer_set_mode_virt_mem(enum clock_event_mode mode, 103 + struct clock_event_device *clk) 104 + { 105 + timer_set_mode(ARCH_TIMER_MEM_VIRT_ACCESS, mode, clk); 106 + } 107 + 108 + static void arch_timer_set_mode_phys_mem(enum clock_event_mode mode, 109 + struct clock_event_device *clk) 110 + { 111 + timer_set_mode(ARCH_TIMER_MEM_PHYS_ACCESS, mode, clk); 112 + } 113 + 114 + static __always_inline void set_next_event(const int access, unsigned long evt, 115 + struct clock_event_device *clk) 208 116 { 209 117 unsigned long ctrl; 210 - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); 118 + ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 211 119 ctrl |= ARCH_TIMER_CTRL_ENABLE; 212 120 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 213 - arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt); 214 - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); 121 + arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk); 122 + arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 215 123 } 216 124 217 125 static int arch_timer_set_next_event_virt(unsigned long evt, 218 - struct clock_event_device *unused) 126 + struct clock_event_device *clk) 219 127 { 220 - set_next_event(ARCH_TIMER_VIRT_ACCESS, evt); 128 + set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); 221 129 return 0; 222 130 } 223 131 224 132 static int arch_timer_set_next_event_phys(unsigned long evt, 225 - struct clock_event_device *unused) 133 + struct clock_event_device *clk) 226 134 { 227 - set_next_event(ARCH_TIMER_PHYS_ACCESS, evt); 135 + set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); 228 136 return 0; 137 + } 138 + 139 + static int arch_timer_set_next_event_virt_mem(unsigned long evt, 140 + struct clock_event_device *clk) 141 + { 142 + set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk); 143 + return 0; 144 + } 145 + 146 + static int arch_timer_set_next_event_phys_mem(unsigned long evt, 147 + struct clock_event_device *clk) 148 + { 149 + set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk); 150 + return 0; 151 + } 152 + 153 + static void __arch_timer_setup(unsigned type, 154 + struct clock_event_device *clk) 155 + { 156 + clk->features = CLOCK_EVT_FEAT_ONESHOT; 157 + 158 + if (type == ARCH_CP15_TIMER) { 159 + clk->features |= CLOCK_EVT_FEAT_C3STOP; 160 + clk->name = "arch_sys_timer"; 161 + clk->rating = 450; 162 + clk->cpumask = cpumask_of(smp_processor_id()); 163 + if (arch_timer_use_virtual) { 164 + clk->irq = arch_timer_ppi[VIRT_PPI]; 165 + clk->set_mode = arch_timer_set_mode_virt; 166 + clk->set_next_event = arch_timer_set_next_event_virt; 167 + } else { 168 + clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; 169 + clk->set_mode = arch_timer_set_mode_phys; 170 + clk->set_next_event = arch_timer_set_next_event_phys; 171 + } 172 + } else { 173 + clk->name = "arch_mem_timer"; 174 + clk->rating = 400; 175 + clk->cpumask = cpu_all_mask; 176 + if (arch_timer_mem_use_virtual) { 177 + clk->set_mode = arch_timer_set_mode_virt_mem; 178 + clk->set_next_event = 179 + arch_timer_set_next_event_virt_mem; 180 + } else { 181 + clk->set_mode = arch_timer_set_mode_phys_mem; 182 + clk->set_next_event = 183 + arch_timer_set_next_event_phys_mem; 184 + } 185 + } 186 + 187 + clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk); 188 + 189 + clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); 229 190 } 230 191 231 192 static int arch_timer_setup(struct clock_event_device *clk) 232 193 { 233 - clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; 234 - clk->name = "arch_sys_timer"; 235 - clk->rating = 450; 236 - if (arch_timer_use_virtual) { 237 - clk->irq = arch_timer_ppi[VIRT_PPI]; 238 - clk->set_mode = arch_timer_set_mode_virt; 239 - clk->set_next_event = arch_timer_set_next_event_virt; 240 - } else { 241 - clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; 242 - clk->set_mode = arch_timer_set_mode_phys; 243 - clk->set_next_event = arch_timer_set_next_event_phys; 244 - } 245 - 246 - clk->cpumask = cpumask_of(smp_processor_id()); 247 - 248 - clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); 249 - 250 - clockevents_config_and_register(clk, arch_timer_rate, 251 - 0xf, 0x7fffffff); 194 + __arch_timer_setup(ARCH_CP15_TIMER, clk); 252 195 253 196 if (arch_timer_use_virtual) 254 197 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); ··· 311 158 return 0; 312 159 } 313 160 314 - static int arch_timer_available(void) 161 + static void 162 + arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np) 315 163 { 316 - u32 freq; 164 + /* Who has more than one independent system counter? */ 165 + if (arch_timer_rate) 166 + return; 317 167 318 - if (arch_timer_rate == 0) { 319 - freq = arch_timer_get_cntfrq(); 320 - 321 - /* Check the timer frequency. */ 322 - if (freq == 0) { 323 - pr_warn("Architected timer frequency not available\n"); 324 - return -EINVAL; 325 - } 326 - 327 - arch_timer_rate = freq; 168 + /* Try to determine the frequency from the device tree or CNTFRQ */ 169 + if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) { 170 + if (cntbase) 171 + arch_timer_rate = readl_relaxed(cntbase + CNTFRQ); 172 + else 173 + arch_timer_rate = arch_timer_get_cntfrq(); 328 174 } 329 175 330 - pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n", 176 + /* Check the timer frequency. */ 177 + if (arch_timer_rate == 0) 178 + pr_warn("Architected timer frequency not available\n"); 179 + } 180 + 181 + static void arch_timer_banner(unsigned type) 182 + { 183 + pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", 184 + type & ARCH_CP15_TIMER ? "cp15" : "", 185 + type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "", 186 + type & ARCH_MEM_TIMER ? "mmio" : "", 331 187 (unsigned long)arch_timer_rate / 1000000, 332 188 (unsigned long)(arch_timer_rate / 10000) % 100, 333 - arch_timer_use_virtual ? "virt" : "phys"); 334 - return 0; 189 + type & ARCH_CP15_TIMER ? 190 + arch_timer_use_virtual ? "virt" : "phys" : 191 + "", 192 + type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "", 193 + type & ARCH_MEM_TIMER ? 194 + arch_timer_mem_use_virtual ? "virt" : "phys" : 195 + ""); 335 196 } 336 197 337 198 u32 arch_timer_get_rate(void) ··· 353 186 return arch_timer_rate; 354 187 } 355 188 356 - u64 arch_timer_read_counter(void) 189 + static u64 arch_counter_get_cntvct_mem(void) 357 190 { 358 - return arch_counter_get_cntvct(); 191 + u32 vct_lo, vct_hi, tmp_hi; 192 + 193 + do { 194 + vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); 195 + vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO); 196 + tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); 197 + } while (vct_hi != tmp_hi); 198 + 199 + return ((u64) vct_hi << 32) | vct_lo; 359 200 } 201 + 202 + /* 203 + * Default to cp15 based access because arm64 uses this function for 204 + * sched_clock() before DT is probed and the cp15 method is guaranteed 205 + * to exist on arm64. arm doesn't use this before DT is probed so even 206 + * if we don't have the cp15 accessors we won't have a problem. 207 + */ 208 + u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; 360 209 361 210 static cycle_t arch_counter_read(struct clocksource *cs) 362 211 { 363 - return arch_counter_get_cntvct(); 212 + return arch_timer_read_counter(); 364 213 } 365 214 366 215 static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) 367 216 { 368 - return arch_counter_get_cntvct(); 217 + return arch_timer_read_counter(); 369 218 } 370 219 371 220 static struct clocksource clocksource_counter = { ··· 402 219 struct timecounter *arch_timer_get_timecounter(void) 403 220 { 404 221 return &timecounter; 222 + } 223 + 224 + static void __init arch_counter_register(unsigned type) 225 + { 226 + u64 start_count; 227 + 228 + /* Register the CP15 based counter if we have one */ 229 + if (type & ARCH_CP15_TIMER) 230 + arch_timer_read_counter = arch_counter_get_cntvct; 231 + else 232 + arch_timer_read_counter = arch_counter_get_cntvct_mem; 233 + 234 + start_count = arch_timer_read_counter(); 235 + clocksource_register_hz(&clocksource_counter, arch_timer_rate); 236 + cyclecounter.mult = clocksource_counter.mult; 237 + cyclecounter.shift = clocksource_counter.shift; 238 + timecounter_init(&timecounter, &cyclecounter, start_count); 405 239 } 406 240 407 241 static void arch_timer_stop(struct clock_event_device *clk) ··· 465 265 int err; 466 266 int ppi; 467 267 468 - err = arch_timer_available(); 469 - if (err) 470 - goto out; 471 - 472 268 arch_timer_evt = alloc_percpu(struct clock_event_device); 473 269 if (!arch_timer_evt) { 474 270 err = -ENOMEM; 475 271 goto out; 476 272 } 477 - 478 - clocksource_register_hz(&clocksource_counter, arch_timer_rate); 479 - cyclecounter.mult = clocksource_counter.mult; 480 - cyclecounter.shift = clocksource_counter.shift; 481 - timecounter_init(&timecounter, &cyclecounter, 482 - arch_counter_get_cntvct()); 483 273 484 274 if (arch_timer_use_virtual) { 485 275 ppi = arch_timer_ppi[VIRT_PPI]; ··· 521 331 return err; 522 332 } 523 333 334 + static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq) 335 + { 336 + int ret; 337 + irq_handler_t func; 338 + struct arch_timer *t; 339 + 340 + t = kzalloc(sizeof(*t), GFP_KERNEL); 341 + if (!t) 342 + return -ENOMEM; 343 + 344 + t->base = base; 345 + t->evt.irq = irq; 346 + __arch_timer_setup(ARCH_MEM_TIMER, &t->evt); 347 + 348 + if (arch_timer_mem_use_virtual) 349 + func = arch_timer_handler_virt_mem; 350 + else 351 + func = arch_timer_handler_phys_mem; 352 + 353 + ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt); 354 + if (ret) { 355 + pr_err("arch_timer: Failed to request mem timer irq\n"); 356 + kfree(t); 357 + } 358 + 359 + return ret; 360 + } 361 + 362 + static const struct of_device_id arch_timer_of_match[] __initconst = { 363 + { .compatible = "arm,armv7-timer", }, 364 + { .compatible = "arm,armv8-timer", }, 365 + {}, 366 + }; 367 + 368 + static const struct of_device_id arch_timer_mem_of_match[] __initconst = { 369 + { .compatible = "arm,armv7-timer-mem", }, 370 + {}, 371 + }; 372 + 373 + static void __init arch_timer_common_init(void) 374 + { 375 + unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER; 376 + 377 + /* Wait until both nodes are probed if we have two timers */ 378 + if ((arch_timers_present & mask) != mask) { 379 + if (of_find_matching_node(NULL, arch_timer_mem_of_match) && 380 + !(arch_timers_present & ARCH_MEM_TIMER)) 381 + return; 382 + if (of_find_matching_node(NULL, arch_timer_of_match) && 383 + !(arch_timers_present & ARCH_CP15_TIMER)) 384 + return; 385 + } 386 + 387 + arch_timer_banner(arch_timers_present); 388 + arch_counter_register(arch_timers_present); 389 + arch_timer_arch_init(); 390 + } 391 + 524 392 static void __init arch_timer_init(struct device_node *np) 525 393 { 526 - u32 freq; 527 394 int i; 528 395 529 - if (arch_timer_get_rate()) { 396 + if (arch_timers_present & ARCH_CP15_TIMER) { 530 397 pr_warn("arch_timer: multiple nodes in dt, skipping\n"); 531 398 return; 532 399 } 533 400 534 - /* Try to determine the frequency from the device tree or CNTFRQ */ 535 - if (!of_property_read_u32(np, "clock-frequency", &freq)) 536 - arch_timer_rate = freq; 537 - 401 + arch_timers_present |= ARCH_CP15_TIMER; 538 402 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) 539 403 arch_timer_ppi[i] = irq_of_parse_and_map(np, i); 540 - 541 - of_node_put(np); 404 + arch_timer_detect_rate(NULL, np); 542 405 543 406 /* 544 407 * If HYP mode is available, we know that the physical timer ··· 612 369 } 613 370 614 371 arch_timer_register(); 615 - arch_timer_arch_init(); 372 + arch_timer_common_init(); 616 373 } 617 374 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init); 618 375 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init); 376 + 377 + static void __init arch_timer_mem_init(struct device_node *np) 378 + { 379 + struct device_node *frame, *best_frame = NULL; 380 + void __iomem *cntctlbase, *base; 381 + unsigned int irq; 382 + u32 cnttidr; 383 + 384 + arch_timers_present |= ARCH_MEM_TIMER; 385 + cntctlbase = of_iomap(np, 0); 386 + if (!cntctlbase) { 387 + pr_err("arch_timer: Can't find CNTCTLBase\n"); 388 + return; 389 + } 390 + 391 + cnttidr = readl_relaxed(cntctlbase + CNTTIDR); 392 + iounmap(cntctlbase); 393 + 394 + /* 395 + * Try to find a virtual capable frame. Otherwise fall back to a 396 + * physical capable frame. 397 + */ 398 + for_each_available_child_of_node(np, frame) { 399 + int n; 400 + 401 + if (of_property_read_u32(frame, "frame-number", &n)) { 402 + pr_err("arch_timer: Missing frame-number\n"); 403 + of_node_put(best_frame); 404 + of_node_put(frame); 405 + return; 406 + } 407 + 408 + if (cnttidr & CNTTIDR_VIRT(n)) { 409 + of_node_put(best_frame); 410 + best_frame = frame; 411 + arch_timer_mem_use_virtual = true; 412 + break; 413 + } 414 + of_node_put(best_frame); 415 + best_frame = of_node_get(frame); 416 + } 417 + 418 + base = arch_counter_base = of_iomap(best_frame, 0); 419 + if (!base) { 420 + pr_err("arch_timer: Can't map frame's registers\n"); 421 + of_node_put(best_frame); 422 + return; 423 + } 424 + 425 + if (arch_timer_mem_use_virtual) 426 + irq = irq_of_parse_and_map(best_frame, 1); 427 + else 428 + irq = irq_of_parse_and_map(best_frame, 0); 429 + of_node_put(best_frame); 430 + if (!irq) { 431 + pr_err("arch_timer: Frame missing %s irq", 432 + arch_timer_mem_use_virtual ? "virt" : "phys"); 433 + return; 434 + } 435 + 436 + arch_timer_detect_rate(base, np); 437 + arch_timer_mem_register(base, irq); 438 + arch_timer_common_init(); 439 + } 440 + CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", 441 + arch_timer_mem_init);
+1
drivers/clocksource/moxart_timer.c
··· 20 20 #include <linux/of_irq.h> 21 21 #include <linux/io.h> 22 22 #include <linux/clocksource.h> 23 + #include <linux/bitops.h> 23 24 24 25 #define TIMER1_BASE 0x00 25 26 #define TIMER2_BASE 0x10
+7 -3
include/clocksource/arm_arch_timer.h
··· 23 23 #define ARCH_TIMER_CTRL_IT_MASK (1 << 1) 24 24 #define ARCH_TIMER_CTRL_IT_STAT (1 << 2) 25 25 26 - #define ARCH_TIMER_REG_CTRL 0 27 - #define ARCH_TIMER_REG_TVAL 1 26 + enum arch_timer_reg { 27 + ARCH_TIMER_REG_CTRL, 28 + ARCH_TIMER_REG_TVAL, 29 + }; 28 30 29 31 #define ARCH_TIMER_PHYS_ACCESS 0 30 32 #define ARCH_TIMER_VIRT_ACCESS 1 33 + #define ARCH_TIMER_MEM_PHYS_ACCESS 2 34 + #define ARCH_TIMER_MEM_VIRT_ACCESS 3 31 35 32 36 #ifdef CONFIG_ARM_ARCH_TIMER 33 37 34 38 extern u32 arch_timer_get_rate(void); 35 - extern u64 arch_timer_read_counter(void); 39 + extern u64 (*arch_timer_read_counter)(void); 36 40 extern struct timecounter *arch_timer_get_timecounter(void); 37 41 38 42 #else