Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.7-rc6 376 lines 9.1 kB view raw
1/* 2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * vineetg: Jan 1011 9 * -sched_clock( ) no longer jiffies based. Uses the same clocksource 10 * as gtod 11 * 12 * Rajeshwarr/Vineetg: Mar 2008 13 * -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code) 14 * for arch independent gettimeofday() 15 * -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers 16 * 17 * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c 18 */ 19 20/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1 21 * Each can programmed to go from @count to @limit and optionally 22 * interrupt when that happens. 23 * A write to Control Register clears the Interrupt 24 * 25 * We've designated TIMER0 for events (clockevents) 26 * while TIMER1 for free running (clocksource) 27 * 28 * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1 29 * which however is currently broken 30 */ 31 32#include <linux/interrupt.h> 33#include <linux/clk.h> 34#include <linux/clk-provider.h> 35#include <linux/clocksource.h> 36#include <linux/clockchips.h> 37#include <linux/cpu.h> 38#include <linux/of.h> 39#include <linux/of_irq.h> 40#include <asm/irq.h> 41#include <asm/arcregs.h> 42 43#include <asm/mcip.h> 44 45/* Timer related Aux registers */ 46#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */ 47#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */ 48#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */ 49#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */ 50#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */ 51#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */ 52 53#define TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */ 54#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */ 55 56#define ARC_TIMER_MAX 0xFFFFFFFF 57 58static unsigned long arc_timer_freq; 59 60static int noinline arc_get_timer_clk(struct device_node *node) 61{ 62 struct clk *clk; 63 int ret; 64 65 clk = of_clk_get(node, 0); 66 if (IS_ERR(clk)) { 67 pr_err("timer missing clk"); 68 return PTR_ERR(clk); 69 } 70 71 ret = clk_prepare_enable(clk); 72 if (ret) { 73 pr_err("Couldn't enable parent clk\n"); 74 return ret; 75 } 76 77 arc_timer_freq = clk_get_rate(clk); 78 79 return 0; 80} 81 82/********** Clock Source Device *********/ 83 84#ifdef CONFIG_ARC_HAS_GFRC 85 86static cycle_t arc_read_gfrc(struct clocksource *cs) 87{ 88 unsigned long flags; 89 union { 90#ifdef CONFIG_CPU_BIG_ENDIAN 91 struct { u32 h, l; }; 92#else 93 struct { u32 l, h; }; 94#endif 95 cycle_t full; 96 } stamp; 97 98 local_irq_save(flags); 99 100 __mcip_cmd(CMD_GFRC_READ_LO, 0); 101 stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK); 102 103 __mcip_cmd(CMD_GFRC_READ_HI, 0); 104 stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK); 105 106 local_irq_restore(flags); 107 108 return stamp.full; 109} 110 111static struct clocksource arc_counter_gfrc = { 112 .name = "ARConnect GFRC", 113 .rating = 400, 114 .read = arc_read_gfrc, 115 .mask = CLOCKSOURCE_MASK(64), 116 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 117}; 118 119static void __init arc_cs_setup_gfrc(struct device_node *node) 120{ 121 int exists = cpuinfo_arc700[0].extn.gfrc; 122 int ret; 123 124 if (WARN(!exists, "Global-64-bit-Ctr clocksource not detected")) 125 return; 126 127 ret = arc_get_timer_clk(node); 128 if (ret) 129 return; 130 131 clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq); 132} 133CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc); 134 135#endif 136 137#ifdef CONFIG_ARC_HAS_RTC 138 139#define AUX_RTC_CTRL 0x103 140#define AUX_RTC_LOW 0x104 141#define AUX_RTC_HIGH 0x105 142 143static cycle_t arc_read_rtc(struct clocksource *cs) 144{ 145 unsigned long status; 146 union { 147#ifdef CONFIG_CPU_BIG_ENDIAN 148 struct { u32 high, low; }; 149#else 150 struct { u32 low, high; }; 151#endif 152 cycle_t full; 153 } stamp; 154 155 156 __asm__ __volatile( 157 "1: \n" 158 " lr %0, [AUX_RTC_LOW] \n" 159 " lr %1, [AUX_RTC_HIGH] \n" 160 " lr %2, [AUX_RTC_CTRL] \n" 161 " bbit0.nt %2, 31, 1b \n" 162 : "=r" (stamp.low), "=r" (stamp.high), "=r" (status)); 163 164 return stamp.full; 165} 166 167static struct clocksource arc_counter_rtc = { 168 .name = "ARCv2 RTC", 169 .rating = 350, 170 .read = arc_read_rtc, 171 .mask = CLOCKSOURCE_MASK(64), 172 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 173}; 174 175static void __init arc_cs_setup_rtc(struct device_node *node) 176{ 177 int exists = cpuinfo_arc700[smp_processor_id()].extn.rtc; 178 int ret; 179 180 if (WARN(!exists, "Local-64-bit-Ctr clocksource not detected")) 181 return; 182 183 /* Local to CPU hence not usable in SMP */ 184 if (WARN(IS_ENABLED(CONFIG_SMP), "Local-64-bit-Ctr not usable in SMP")) 185 return; 186 187 ret = arc_get_timer_clk(node); 188 if (ret) 189 return; 190 191 write_aux_reg(AUX_RTC_CTRL, 1); 192 193 clocksource_register_hz(&arc_counter_rtc, arc_timer_freq); 194} 195CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc); 196 197#endif 198 199/* 200 * 32bit TIMER1 to keep counting monotonically and wraparound 201 */ 202 203static cycle_t arc_read_timer1(struct clocksource *cs) 204{ 205 return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT); 206} 207 208static struct clocksource arc_counter_timer1 = { 209 .name = "ARC Timer1", 210 .rating = 300, 211 .read = arc_read_timer1, 212 .mask = CLOCKSOURCE_MASK(32), 213 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 214}; 215 216static void __init arc_cs_setup_timer1(struct device_node *node) 217{ 218 int ret; 219 220 /* Local to CPU hence not usable in SMP */ 221 if (IS_ENABLED(CONFIG_SMP)) 222 return; 223 224 ret = arc_get_timer_clk(node); 225 if (ret) 226 return; 227 228 write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX); 229 write_aux_reg(ARC_REG_TIMER1_CNT, 0); 230 write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH); 231 232 clocksource_register_hz(&arc_counter_timer1, arc_timer_freq); 233} 234 235/********** Clock Event Device *********/ 236 237static int arc_timer_irq; 238 239/* 240 * Arm the timer to interrupt after @cycles 241 * The distinction for oneshot/periodic is done in arc_event_timer_ack() below 242 */ 243static void arc_timer_event_setup(unsigned int cycles) 244{ 245 write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles); 246 write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */ 247 248 write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH); 249} 250 251 252static int arc_clkevent_set_next_event(unsigned long delta, 253 struct clock_event_device *dev) 254{ 255 arc_timer_event_setup(delta); 256 return 0; 257} 258 259static int arc_clkevent_set_periodic(struct clock_event_device *dev) 260{ 261 /* 262 * At X Hz, 1 sec = 1000ms -> X cycles; 263 * 10ms -> X / 100 cycles 264 */ 265 arc_timer_event_setup(arc_timer_freq / HZ); 266 return 0; 267} 268 269static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { 270 .name = "ARC Timer0", 271 .features = CLOCK_EVT_FEAT_ONESHOT | 272 CLOCK_EVT_FEAT_PERIODIC, 273 .rating = 300, 274 .set_next_event = arc_clkevent_set_next_event, 275 .set_state_periodic = arc_clkevent_set_periodic, 276}; 277 278static irqreturn_t timer_irq_handler(int irq, void *dev_id) 279{ 280 /* 281 * Note that generic IRQ core could have passed @evt for @dev_id if 282 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() 283 */ 284 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); 285 int irq_reenable = clockevent_state_periodic(evt); 286 287 /* 288 * Any write to CTRL reg ACks the interrupt, we rewrite the 289 * Count when [N]ot [H]alted bit. 290 * And re-arm it if perioid by [I]nterrupt [E]nable bit 291 */ 292 write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH); 293 294 evt->event_handler(evt); 295 296 return IRQ_HANDLED; 297} 298 299static int arc_timer_cpu_notify(struct notifier_block *self, 300 unsigned long action, void *hcpu) 301{ 302 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); 303 304 evt->cpumask = cpumask_of(smp_processor_id()); 305 306 switch (action & ~CPU_TASKS_FROZEN) { 307 case CPU_STARTING: 308 clockevents_config_and_register(evt, arc_timer_freq, 309 0, ULONG_MAX); 310 enable_percpu_irq(arc_timer_irq, 0); 311 break; 312 case CPU_DYING: 313 disable_percpu_irq(arc_timer_irq); 314 break; 315 } 316 317 return NOTIFY_OK; 318} 319 320static struct notifier_block arc_timer_cpu_nb = { 321 .notifier_call = arc_timer_cpu_notify, 322}; 323 324/* 325 * clockevent setup for boot CPU 326 */ 327static void __init arc_clockevent_setup(struct device_node *node) 328{ 329 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); 330 int ret; 331 332 register_cpu_notifier(&arc_timer_cpu_nb); 333 334 arc_timer_irq = irq_of_parse_and_map(node, 0); 335 if (arc_timer_irq <= 0) 336 panic("clockevent: missing irq"); 337 338 ret = arc_get_timer_clk(node); 339 if (ret) 340 panic("clockevent: missing clk"); 341 342 evt->irq = arc_timer_irq; 343 evt->cpumask = cpumask_of(smp_processor_id()); 344 clockevents_config_and_register(evt, arc_timer_freq, 345 0, ARC_TIMER_MAX); 346 347 /* Needs apriori irq_set_percpu_devid() done in intc map function */ 348 ret = request_percpu_irq(arc_timer_irq, timer_irq_handler, 349 "Timer0 (per-cpu-tick)", evt); 350 if (ret) 351 panic("clockevent: unable to request irq\n"); 352 353 enable_percpu_irq(arc_timer_irq, 0); 354} 355 356static void __init arc_of_timer_init(struct device_node *np) 357{ 358 static int init_count = 0; 359 360 if (!init_count) { 361 init_count = 1; 362 arc_clockevent_setup(np); 363 } else { 364 arc_cs_setup_timer1(np); 365 } 366} 367CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init); 368 369/* 370 * Called from start_kernel() - boot CPU only 371 */ 372void __init time_init(void) 373{ 374 of_clk_init(NULL); 375 clocksource_probe(); 376}