Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer updates from Thomas Gleixner:
"This update provides the following changes:

- The rework of the timer wheel which addresses the shortcomings of
the current wheel (cascading, slow search for next expiring timer,
etc). That's the first major change of the wheel in almost 20
years since Finn implemted it.

- A large overhaul of the clocksource drivers init functions to
consolidate the Device Tree initialization

- Some more Y2038 updates

- A capability fix for timerfd

- Yet another clock chip driver

- The usual pile of updates, comment improvements all over the place"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (130 commits)
tick/nohz: Optimize nohz idle enter
clockevents: Make clockevents_subsys static
clocksource/drivers/time-armada-370-xp: Fix return value check
timers: Implement optimization for same expiry time in mod_timer()
timers: Split out index calculation
timers: Only wake softirq if necessary
timers: Forward the wheel clock whenever possible
timers/nohz: Remove pointless tick_nohz_kick_tick() function
timers: Optimize collect_expired_timers() for NOHZ
timers: Move __run_timers() function
timers: Remove set_timer_slack() leftovers
timers: Switch to a non-cascading wheel
timers: Reduce the CPU index space to 256k
timers: Give a few structs and members proper names
hlist: Add hlist_is_singular_node() helper
signals: Use hrtimer for sigtimedwait()
timers: Remove the deprecated mod_timer_pinned() API
timers, net/ipv4/inet: Initialize connection request timers as pinned
timers, drivers/tty/mips_ejtag: Initialize the poll timer as pinned
timers, drivers/tty/metag_da: Initialize the poll timer as pinned
...

+2462 -1210
+17
Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt
··· 1 + Oxford Semiconductor OXNAS SoCs Family RPS Timer 2 + ================================================ 3 + 4 + Required properties: 5 + - compatible: Should be "oxsemi,ox810se-rps-timer" 6 + - reg : Specifies base physical address and size of the registers. 7 + - interrupts : The interrupts of the two timers 8 + - clocks : The phandle of the timer clock source 9 + 10 + example: 11 + 12 + timer0: timer@200 { 13 + compatible = "oxsemi,ox810se-rps-timer"; 14 + reg = <0x200 0x40>; 15 + clocks = <&rpsclk>; 16 + interrupts = <4 5>; 17 + };
+4 -2
Documentation/devicetree/bindings/timer/rockchip,rk3288-timer.txt Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt
··· 1 - Rockchip rk3288 timer 1 + Rockchip rk timer 2 2 3 3 Required properties: 4 - - compatible: shall be "rockchip,rk3288-timer" 4 + - compatible: shall be one of: 5 + "rockchip,rk3288-timer" - for rk3066, rk3036, rk3188, rk322x, rk3288, rk3368 6 + "rockchip,rk3399-timer" - for rk3399 5 7 - reg: base address of the timer register starting with TIMERS CONTROL register 6 8 - interrupts: should contain the interrupts for Timer0 7 9 - clocks : must contain an entry for each entry in clock-names
+8
Documentation/kernel-parameters.txt
··· 687 687 [SPARC64] tick 688 688 [X86-64] hpet,tsc 689 689 690 + clocksource.arm_arch_timer.evtstrm= 691 + [ARM,ARM64] 692 + Format: <bool> 693 + Enable/disable the eventstream feature of the ARM 694 + architected timer so that code using WFE-based polling 695 + loops can be debugged more effectively on production 696 + systems. 697 + 690 698 clearcpuid=BITNUM [X86] 691 699 Disable CPUID feature X for the kernel. See 692 700 arch/x86/include/asm/cpufeatures.h for the valid bit
+39 -24
arch/arc/kernel/time.c
··· 116 116 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 117 117 }; 118 118 119 - static void __init arc_cs_setup_gfrc(struct device_node *node) 119 + static int __init arc_cs_setup_gfrc(struct device_node *node) 120 120 { 121 121 int exists = cpuinfo_arc700[0].extn.gfrc; 122 122 int ret; 123 123 124 124 if (WARN(!exists, "Global-64-bit-Ctr clocksource not detected")) 125 - return; 125 + return -ENXIO; 126 126 127 127 ret = arc_get_timer_clk(node); 128 128 if (ret) 129 - return; 129 + return ret; 130 130 131 - clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq); 131 + return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq); 132 132 } 133 133 CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc); 134 134 ··· 172 172 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 173 173 }; 174 174 175 - static void __init arc_cs_setup_rtc(struct device_node *node) 175 + static int __init arc_cs_setup_rtc(struct device_node *node) 176 176 { 177 177 int exists = cpuinfo_arc700[smp_processor_id()].extn.rtc; 178 178 int ret; 179 179 180 180 if (WARN(!exists, "Local-64-bit-Ctr clocksource not detected")) 181 - return; 181 + return -ENXIO; 182 182 183 183 /* Local to CPU hence not usable in SMP */ 184 184 if (WARN(IS_ENABLED(CONFIG_SMP), "Local-64-bit-Ctr not usable in SMP")) 185 - return; 185 + return -EINVAL; 186 186 187 187 ret = arc_get_timer_clk(node); 188 188 if (ret) 189 - return; 189 + return ret; 190 190 191 191 write_aux_reg(AUX_RTC_CTRL, 1); 192 192 193 - clocksource_register_hz(&arc_counter_rtc, arc_timer_freq); 193 + return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq); 194 194 } 195 195 CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc); 196 196 ··· 213 213 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 214 214 }; 215 215 216 - static void __init arc_cs_setup_timer1(struct device_node *node) 216 + static int __init arc_cs_setup_timer1(struct device_node *node) 217 217 { 218 218 int ret; 219 219 220 220 /* Local to CPU hence not usable in SMP */ 221 221 if (IS_ENABLED(CONFIG_SMP)) 222 - return; 222 + return -EINVAL; 223 223 224 224 ret = arc_get_timer_clk(node); 225 225 if (ret) 226 - return; 226 + return ret; 227 227 228 228 write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX); 229 229 write_aux_reg(ARC_REG_TIMER1_CNT, 0); 230 230 write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH); 231 231 232 - clocksource_register_hz(&arc_counter_timer1, arc_timer_freq); 232 + return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq); 233 233 } 234 234 235 235 /********** Clock Event Device *********/ ··· 324 324 /* 325 325 * clockevent setup for boot CPU 326 326 */ 327 - static void __init arc_clockevent_setup(struct device_node *node) 327 + static int __init arc_clockevent_setup(struct device_node *node) 328 328 { 329 329 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); 330 330 int ret; 331 331 332 - register_cpu_notifier(&arc_timer_cpu_nb); 332 + ret = register_cpu_notifier(&arc_timer_cpu_nb); 333 + if (ret) { 334 + pr_err("Failed to register cpu notifier"); 335 + return ret; 336 + } 333 337 334 338 arc_timer_irq = irq_of_parse_and_map(node, 0); 335 - if (arc_timer_irq <= 0) 336 - panic("clockevent: missing irq"); 339 + if (arc_timer_irq <= 0) { 340 + pr_err("clockevent: missing irq"); 341 + return -EINVAL; 342 + } 337 343 338 344 ret = arc_get_timer_clk(node); 339 - if (ret) 340 - panic("clockevent: missing clk"); 345 + if (ret) { 346 + pr_err("clockevent: missing clk"); 347 + return ret; 348 + } 341 349 342 350 evt->irq = arc_timer_irq; 343 351 evt->cpumask = cpumask_of(smp_processor_id()); ··· 355 347 /* Needs apriori irq_set_percpu_devid() done in intc map function */ 356 348 ret = request_percpu_irq(arc_timer_irq, timer_irq_handler, 357 349 "Timer0 (per-cpu-tick)", evt); 358 - if (ret) 359 - panic("clockevent: unable to request irq\n"); 350 + if (ret) { 351 + pr_err("clockevent: unable to request irq\n"); 352 + return ret; 353 + } 360 354 361 355 enable_percpu_irq(arc_timer_irq, 0); 356 + 357 + return 0; 362 358 } 363 359 364 - static void __init arc_of_timer_init(struct device_node *np) 360 + static int __init arc_of_timer_init(struct device_node *np) 365 361 { 366 362 static int init_count = 0; 363 + int ret; 367 364 368 365 if (!init_count) { 369 366 init_count = 1; 370 - arc_clockevent_setup(np); 367 + ret = arc_clockevent_setup(np); 371 368 } else { 372 - arc_cs_setup_timer1(np); 369 + ret = arc_cs_setup_timer1(np); 373 370 } 371 + 372 + return ret; 374 373 } 375 374 CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init); 376 375
+1 -1
arch/arm/Kconfig
··· 358 358 bool "Cirrus Logic CLPS711x/EP721x/EP731x-based" 359 359 select ARCH_REQUIRE_GPIOLIB 360 360 select AUTO_ZRELADDR 361 - select CLKSRC_MMIO 362 361 select COMMON_CLK 363 362 select CPU_ARM720T 364 363 select GENERIC_CLOCKEVENTS 364 + select CLPS711X_TIMER 365 365 select MFD_SYSCON 366 366 select SOC_BUS 367 367 help
+2 -1
arch/arm/kernel/smp_twd.c
··· 390 390 } 391 391 392 392 #ifdef CONFIG_OF 393 - static void __init twd_local_timer_of_register(struct device_node *np) 393 + static int __init twd_local_timer_of_register(struct device_node *np) 394 394 { 395 395 int err; 396 396 ··· 410 410 411 411 out: 412 412 WARN(err, "twd_local_timer_of_register failed (%d)\n", err); 413 + return err; 413 414 } 414 415 CLOCKSOURCE_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register); 415 416 CLOCKSOURCE_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register);
+2
arch/arm/mach-bcm/Kconfig
··· 89 89 select HAVE_ARM_ARCH_TIMER 90 90 select PINCTRL 91 91 select ARCH_BCM_MOBILE_SMP if SMP 92 + select BCM_KONA_TIMER 92 93 help 93 94 This enables support for systems based on Broadcom mobile SoCs. 94 95 ··· 144 143 select ARM_TIMER_SP804 145 144 select HAVE_ARM_ARCH_TIMER if ARCH_MULTI_V7 146 145 select CLKSRC_OF 146 + select BCM2835_TIMER 147 147 select PINCTRL 148 148 select PINCTRL_BCM2835 149 149 help
+1 -1
arch/arm/mach-integrator/Kconfig
··· 20 20 21 21 config ARCH_INTEGRATOR_AP 22 22 bool "Support Integrator/AP and Integrator/PP2 platforms" 23 - select CLKSRC_MMIO 23 + select INTEGRATOR_AP_TIMER 24 24 select MIGHT_HAVE_PCI 25 25 select SERIAL_AMBA_PL010 if TTY 26 26 select SERIAL_AMBA_PL010_CONSOLE if TTY
+1 -1
arch/arm/mach-keystone/Kconfig
··· 4 4 depends on ARM_PATCH_PHYS_VIRT 5 5 select ARM_GIC 6 6 select HAVE_ARM_ARCH_TIMER 7 - select CLKSRC_MMIO 7 + select KEYSTONE_TIMER 8 8 select ARM_ERRATA_798181 if SMP 9 9 select COMMON_CLK_KEYSTONE 10 10 select ARCH_SUPPORTS_BIG_ENDIAN
+1 -1
arch/arm/mach-moxart/Kconfig
··· 3 3 depends on ARCH_MULTI_V4 4 4 select CPU_FA526 5 5 select ARM_DMA_MEM_BUFFERABLE 6 - select CLKSRC_MMIO 6 + select MOXART_TIMER 7 7 select GENERIC_IRQ_CHIP 8 8 select ARCH_REQUIRE_GPIOLIB 9 9 select PHYLIB if NETDEVICES
+1 -1
arch/arm/mach-mxs/Kconfig
··· 16 16 bool "Freescale MXS (i.MX23, i.MX28) support" 17 17 depends on ARCH_MULTI_V5 18 18 select ARCH_REQUIRE_GPIOLIB 19 - select CLKSRC_MMIO 19 + select MXS_TIMER 20 20 select PINCTRL 21 21 select SOC_BUS 22 22 select SOC_IMX23
+1
arch/arm/mach-nspire/Kconfig
··· 7 7 select ARM_AMBA 8 8 select ARM_VIC 9 9 select ARM_TIMER_SP804 10 + select NSPIRE_TIMER 10 11 help 11 12 This enables support for systems using the TI-NSPIRE CPU
+2
arch/arm/mach-prima2/Kconfig
··· 28 28 default y 29 29 select ARM_GIC 30 30 select CPU_V7 31 + select ATLAS7_TIMER 31 32 select HAVE_ARM_SCU if SMP 32 33 select HAVE_SMP 33 34 help ··· 39 38 default y 40 39 select SIRF_IRQ 41 40 select ZONE_DMA 41 + select PRIMA2_TIMER 42 42 help 43 43 Support for CSR SiRFSoC ARM Cortex A9 Platform 44 44
+1 -1
arch/arm/mach-u300/Kconfig
··· 4 4 select ARCH_REQUIRE_GPIOLIB 5 5 select ARM_AMBA 6 6 select ARM_VIC 7 - select CLKSRC_MMIO 7 + select U300_TIMER 8 8 select CPU_ARM926T 9 9 select HAVE_TCM 10 10 select PINCTRL
+8
arch/arm64/boot/dts/rockchip/rk3399.dtsi
··· 492 492 interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; 493 493 }; 494 494 495 + rktimer: rktimer@ff850000 { 496 + compatible = "rockchip,rk3399-timer"; 497 + reg = <0x0 0xff850000 0x0 0x1000>; 498 + interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>; 499 + clocks = <&cru PCLK_TIMER0>, <&cru SCLK_TIMER00>; 500 + clock-names = "pclk", "timer"; 501 + }; 502 + 495 503 spdif: spdif@ff870000 { 496 504 compatible = "rockchip,rk3399-spdif"; 497 505 reg = <0x0 0xff870000 0x0 0x1000>;
+37 -12
arch/microblaze/kernel/timer.c
··· 170 170 .dev_id = &clockevent_xilinx_timer, 171 171 }; 172 172 173 - static __init void xilinx_clockevent_init(void) 173 + static __init int xilinx_clockevent_init(void) 174 174 { 175 175 clockevent_xilinx_timer.mult = 176 176 div_sc(timer_clock_freq, NSEC_PER_SEC, ··· 181 181 clockevent_delta2ns(1, &clockevent_xilinx_timer); 182 182 clockevent_xilinx_timer.cpumask = cpumask_of(0); 183 183 clockevents_register_device(&clockevent_xilinx_timer); 184 + 185 + return 0; 184 186 } 185 187 186 188 static u64 xilinx_clock_read(void) ··· 231 229 232 230 static int __init xilinx_clocksource_init(void) 233 231 { 234 - if (clocksource_register_hz(&clocksource_microblaze, timer_clock_freq)) 235 - panic("failed to register clocksource"); 232 + int ret; 233 + 234 + ret = clocksource_register_hz(&clocksource_microblaze, 235 + timer_clock_freq); 236 + if (ret) { 237 + pr_err("failed to register clocksource"); 238 + return ret; 239 + } 236 240 237 241 /* stop timer1 */ 238 242 write_fn(read_fn(timer_baseaddr + TCSR1) & ~TCSR_ENT, ··· 247 239 write_fn(TCSR_TINT|TCSR_ENT|TCSR_ARHT, timer_baseaddr + TCSR1); 248 240 249 241 /* register timecounter - for ftrace support */ 250 - init_xilinx_timecounter(); 251 - return 0; 242 + return init_xilinx_timecounter(); 252 243 } 253 244 254 - static void __init xilinx_timer_init(struct device_node *timer) 245 + static int __init xilinx_timer_init(struct device_node *timer) 255 246 { 256 247 struct clk *clk; 257 248 static int initialized; 258 249 u32 irq; 259 250 u32 timer_num = 1; 251 + int ret; 260 252 261 253 if (initialized) 262 254 return; ··· 266 258 timer_baseaddr = of_iomap(timer, 0); 267 259 if (!timer_baseaddr) { 268 260 pr_err("ERROR: invalid timer base address\n"); 269 - BUG(); 261 + return -ENXIO; 270 262 } 271 263 272 264 write_fn = timer_write32; ··· 279 271 } 280 272 281 273 irq = irq_of_parse_and_map(timer, 0); 274 + if (irq <= 0) { 275 + pr_err("Failed to parse and map irq"); 276 + return -EINVAL; 277 + } 282 278 283 279 of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num); 284 280 if (timer_num) { 285 - pr_emerg("Please enable two timers in HW\n"); 286 - BUG(); 281 + pr_err("Please enable two timers in HW\n"); 282 + return -EINVAL; 287 283 } 288 284 289 285 pr_info("%s: irq=%d\n", timer->full_name, irq); ··· 309 297 310 298 freq_div_hz = timer_clock_freq / HZ; 311 299 312 - setup_irq(irq, &timer_irqaction); 300 + ret = setup_irq(irq, &timer_irqaction); 301 + if (ret) { 302 + pr_err("Failed to setup IRQ"); 303 + return ret; 304 + } 305 + 313 306 #ifdef CONFIG_HEART_BEAT 314 307 microblaze_setup_heartbeat(); 315 308 #endif 316 - xilinx_clocksource_init(); 317 - xilinx_clockevent_init(); 309 + 310 + ret = xilinx_clocksource_init(); 311 + if (ret) 312 + return ret; 313 + 314 + ret = xilinx_clockevent_init(); 315 + if (ret) 316 + return ret; 318 317 319 318 sched_clock_register(xilinx_clock_read, 32, timer_clock_freq); 319 + 320 + return 0; 320 321 } 321 322 322 323 CLOCKSOURCE_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a",
+12 -5
arch/mips/ralink/cevt-rt3352.c
··· 117 117 return 0; 118 118 } 119 119 120 - static void __init ralink_systick_init(struct device_node *np) 120 + static int __init ralink_systick_init(struct device_node *np) 121 121 { 122 + int ret; 123 + 122 124 systick.membase = of_iomap(np, 0); 123 125 if (!systick.membase) 124 - return; 126 + return -ENXIO; 125 127 126 128 systick_irqaction.name = np->name; 127 129 systick.dev.name = np->name; ··· 133 131 systick.dev.irq = irq_of_parse_and_map(np, 0); 134 132 if (!systick.dev.irq) { 135 133 pr_err("%s: request_irq failed", np->name); 136 - return; 134 + return -EINVAL; 137 135 } 138 136 139 - clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name, 140 - SYSTICK_FREQ, 301, 16, clocksource_mmio_readl_up); 137 + ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name, 138 + SYSTICK_FREQ, 301, 16, 139 + clocksource_mmio_readl_up); 140 + if (ret) 141 + return ret; 141 142 142 143 clockevents_register_device(&systick.dev); 143 144 144 145 pr_info("%s: running - mult: %d, shift: %d\n", 145 146 np->name, systick.dev.mult, systick.dev.shift); 147 + 148 + return 0; 146 149 } 147 150 148 151 CLOCKSOURCE_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
+44 -19
arch/nios2/kernel/time.c
··· 206 206 return IRQ_HANDLED; 207 207 } 208 208 209 - static void __init nios2_timer_get_base_and_freq(struct device_node *np, 209 + static int __init nios2_timer_get_base_and_freq(struct device_node *np, 210 210 void __iomem **base, u32 *freq) 211 211 { 212 212 *base = of_iomap(np, 0); 213 - if (!*base) 214 - panic("Unable to map reg for %s\n", np->name); 213 + if (!*base) { 214 + pr_crit("Unable to map reg for %s\n", np->name); 215 + return -ENXIO; 216 + } 215 217 216 - if (of_property_read_u32(np, "clock-frequency", freq)) 217 - panic("Unable to get %s clock frequency\n", np->name); 218 + if (of_property_read_u32(np, "clock-frequency", freq)) { 219 + pr_crit("Unable to get %s clock frequency\n", np->name); 220 + return -EINVAL; 221 + } 222 + 223 + return 0; 218 224 } 219 225 220 226 static struct nios2_clockevent_dev nios2_ce = { ··· 237 231 }, 238 232 }; 239 233 240 - static __init void nios2_clockevent_init(struct device_node *timer) 234 + static __init int nios2_clockevent_init(struct device_node *timer) 241 235 { 242 236 void __iomem *iobase; 243 237 u32 freq; 244 - int irq; 238 + int irq, ret; 245 239 246 - nios2_timer_get_base_and_freq(timer, &iobase, &freq); 240 + ret = nios2_timer_get_base_and_freq(timer, &iobase, &freq); 241 + if (ret) 242 + return ret; 247 243 248 244 irq = irq_of_parse_and_map(timer, 0); 249 - if (!irq) 250 - panic("Unable to parse timer irq\n"); 245 + if (!irq) { 246 + pr_crit("Unable to parse timer irq\n"); 247 + return -EINVAL; 248 + } 251 249 252 250 nios2_ce.timer.base = iobase; 253 251 nios2_ce.timer.freq = freq; ··· 263 253 /* clear pending interrupt */ 264 254 timer_writew(&nios2_ce.timer, 0, ALTERA_TIMER_STATUS_REG); 265 255 266 - if (request_irq(irq, timer_interrupt, IRQF_TIMER, timer->name, 267 - &nios2_ce.ced)) 268 - panic("Unable to setup timer irq\n"); 256 + ret = request_irq(irq, timer_interrupt, IRQF_TIMER, timer->name, 257 + &nios2_ce.ced); 258 + if (ret) { 259 + pr_crit("Unable to setup timer irq\n"); 260 + return ret; 261 + } 269 262 270 263 clockevents_config_and_register(&nios2_ce.ced, freq, 1, ULONG_MAX); 264 + 265 + return 0; 271 266 } 272 267 273 - static __init void nios2_clocksource_init(struct device_node *timer) 268 + static __init int nios2_clocksource_init(struct device_node *timer) 274 269 { 275 270 unsigned int ctrl; 276 271 void __iomem *iobase; 277 272 u32 freq; 273 + int ret; 278 274 279 - nios2_timer_get_base_and_freq(timer, &iobase, &freq); 275 + ret = nios2_timer_get_base_and_freq(timer, &iobase, &freq); 276 + if (ret) 277 + return ret; 280 278 281 279 nios2_cs.timer.base = iobase; 282 280 nios2_cs.timer.freq = freq; 283 281 284 - clocksource_register_hz(&nios2_cs.cs, freq); 282 + ret = clocksource_register_hz(&nios2_cs.cs, freq); 283 + if (ret) 284 + return ret; 285 285 286 286 timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODL_REG); 287 287 timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODH_REG); ··· 302 282 303 283 /* Calibrate the delay loop directly */ 304 284 lpj_fine = freq / HZ; 285 + 286 + return 0; 305 287 } 306 288 307 289 /* ··· 311 289 * more instances, the second one gets used as clocksource and all 312 290 * others are unused. 313 291 */ 314 - static void __init nios2_time_init(struct device_node *timer) 292 + static int __init nios2_time_init(struct device_node *timer) 315 293 { 316 294 static int num_called; 295 + int ret; 317 296 318 297 switch (num_called) { 319 298 case 0: 320 - nios2_clockevent_init(timer); 299 + ret = nios2_clockevent_init(timer); 321 300 break; 322 301 case 1: 323 - nios2_clocksource_init(timer); 302 + ret = nios2_clocksource_init(timer); 324 303 break; 325 304 default: 326 305 break; 327 306 } 328 307 329 308 num_called++; 309 + 310 + return ret; 330 311 } 331 312 332 313 void read_persistent_clock(struct timespec *ts)
+2 -2
arch/x86/kernel/apic/x2apic_uv_x.c
··· 918 918 uv_set_scir_bits(bits); 919 919 920 920 /* enable next timer period */ 921 - mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); 921 + mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL); 922 922 } 923 923 924 924 static void uv_heartbeat_enable(int cpu) ··· 927 927 struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer; 928 928 929 929 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY); 930 - setup_timer(timer, uv_heartbeat, cpu); 930 + setup_pinned_timer(timer, uv_heartbeat, cpu); 931 931 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL; 932 932 add_timer_on(timer, cpu); 933 933 uv_cpu_scir_info(cpu)->enabled = 1;
+2 -2
arch/x86/kernel/cpu/mcheck/mce.c
··· 1309 1309 1310 1310 if (timer_pending(t)) { 1311 1311 if (time_before(when, t->expires)) 1312 - mod_timer_pinned(t, when); 1312 + mod_timer(t, when); 1313 1313 } else { 1314 1314 t->expires = round_jiffies(when); 1315 1315 add_timer_on(t, smp_processor_id()); ··· 1735 1735 struct timer_list *t = this_cpu_ptr(&mce_timer); 1736 1736 unsigned int cpu = smp_processor_id(); 1737 1737 1738 - setup_timer(t, mce_timer_fn, cpu); 1738 + setup_pinned_timer(t, mce_timer_fn, cpu); 1739 1739 mce_start_timer(cpu, t); 1740 1740 } 1741 1741
-5
block/genhd.c
··· 1523 1523 if (--ev->block) 1524 1524 goto out_unlock; 1525 1525 1526 - /* 1527 - * Not exactly a latency critical operation, set poll timer 1528 - * slack to 25% and kick event check. 1529 - */ 1530 1526 intv = disk_events_poll_jiffies(disk); 1531 - set_timer_slack(&ev->dwork.timer, intv / 4); 1532 1527 if (check_now) 1533 1528 queue_delayed_work(system_freezable_power_efficient_wq, 1534 1529 &ev->dwork, 0);
+105 -11
drivers/clocksource/Kconfig
··· 27 27 config CLKSRC_MMIO 28 28 bool 29 29 30 + config BCM2835_TIMER 31 + bool "BCM2835 timer driver" if COMPILE_TEST 32 + depends on GENERIC_CLOCKEVENTS 33 + select CLKSRC_MMIO 34 + help 35 + Enables the support for the BCM2835 timer driver. 36 + 37 + config BCM_KONA_TIMER 38 + bool "BCM mobile timer driver" if COMPILE_TEST 39 + depends on GENERIC_CLOCKEVENTS 40 + select CLKSRC_MMIO 41 + help 42 + Enables the support for the BCM Kona mobile timer driver. 43 + 30 44 config DIGICOLOR_TIMER 31 45 bool "Digicolor timer driver" if COMPILE_TEST 32 46 depends on GENERIC_CLOCKEVENTS ··· 155 141 help 156 142 Use the always on PRCMU Timer as clocksource 157 143 144 + config CLPS711X_TIMER 145 + bool "Cirrus logic timer driver" if COMPILE_TEST 146 + depends on GENERIC_CLOCKEVENTS 147 + select CLKSRC_MMIO 148 + help 149 + Enables support for the Cirrus Logic PS711 timer. 150 + 151 + config ATLAS7_TIMER 152 + bool "Atlas7 timer driver" if COMPILE_TEST 153 + depends on GENERIC_CLOCKEVENTS 154 + select CLKSRC_MMIO 155 + help 156 + Enables support for the Atlas7 timer. 157 + 158 + config MOXART_TIMER 159 + bool "Moxart timer driver" if COMPILE_TEST 160 + depends on GENERIC_CLOCKEVENTS 161 + select CLKSRC_MMIO 162 + help 163 + Enables support for the Moxart timer. 164 + 165 + config MXS_TIMER 166 + bool "Mxs timer driver" if COMPILE_TEST 167 + depends on GENERIC_CLOCKEVENTS 168 + select CLKSRC_MMIO 169 + select STMP_DEVICE 170 + help 171 + Enables support for the Mxs timer. 172 + 173 + config PRIMA2_TIMER 174 + bool "Prima2 timer driver" if COMPILE_TEST 175 + depends on GENERIC_CLOCKEVENTS 176 + select CLKSRC_MMIO 177 + help 178 + Enables support for the Prima2 timer. 179 + 180 + config U300_TIMER 181 + bool "U300 timer driver" if COMPILE_TEST 182 + depends on GENERIC_CLOCKEVENTS 183 + depends on ARM 184 + select CLKSRC_MMIO 185 + help 186 + Enables support for the U300 timer. 187 + 188 + config NSPIRE_TIMER 189 + bool "NSpire timer driver" if COMPILE_TEST 190 + depends on GENERIC_CLOCKEVENTS 191 + select CLKSRC_MMIO 192 + help 193 + Enables support for the Nspire timer. 194 + 195 + config KEYSTONE_TIMER 196 + bool "Keystone timer driver" if COMPILE_TEST 197 + depends on GENERIC_CLOCKEVENTS 198 + depends on ARM || ARM64 199 + select CLKSRC_MMIO 200 + help 201 + Enables support for the Keystone timer. 202 + 203 + config INTEGRATOR_AP_TIMER 204 + bool "Integrator-ap timer driver" if COMPILE_TEST 205 + depends on GENERIC_CLOCKEVENTS 206 + select CLKSRC_MMIO 207 + help 208 + Enables support for the Integrator-ap timer. 209 + 158 210 config CLKSRC_DBX500_PRCMU_SCHED_CLOCK 159 211 bool "Clocksource PRCMU Timer sched_clock" 160 212 depends on (CLKSRC_DBX500_PRCMU && !CLKSRC_NOMADIK_MTU_SCHED_CLOCK) ··· 288 208 select CLKSRC_ACPI if ACPI 289 209 290 210 config ARM_ARCH_TIMER_EVTSTREAM 291 - bool "Support for ARM architected timer event stream generation" 211 + bool "Enable ARM architected timer event stream generation by default" 292 212 default y if ARM_ARCH_TIMER 293 213 depends on ARM_ARCH_TIMER 294 214 help 295 - This option enables support for event stream generation based on 296 - the ARM architected timer. It is used for waking up CPUs executing 297 - the wfe instruction at a frequency represented as a power-of-2 298 - divisor of the clock rate. 215 + This option enables support by default for event stream generation 216 + based on the ARM architected timer. It is used for waking up CPUs 217 + executing the wfe instruction at a frequency represented as a 218 + power-of-2 divisor of the clock rate. The behaviour can also be 219 + overridden on the command line using the 220 + clocksource.arm_arch_timer.evtstream parameter. 299 221 The main use of the event stream is wfe-based timeouts of userspace 300 222 locking implementations. It might also be useful for imposing timeout 301 223 on wfe to safeguard against any programming errors in case an expected ··· 306 224 hardware anomalies of missing events. 307 225 308 226 config ARM_GLOBAL_TIMER 309 - bool 227 + bool "Support for the ARM global timer" if COMPILE_TEST 310 228 select CLKSRC_OF if OF 229 + depends on ARM 311 230 help 312 231 This options enables support for the ARM global timer unit 313 232 ··· 326 243 Use ARM global timer clock source as sched_clock 327 244 328 245 config ARMV7M_SYSTICK 329 - bool 246 + bool "Support for the ARMv7M system time" if COMPILE_TEST 330 247 select CLKSRC_OF if OF 331 248 select CLKSRC_MMIO 332 249 help ··· 337 254 def_bool SOC_AT91SAM9 || SOC_SAMA5 338 255 339 256 config ATMEL_ST 340 - bool 257 + bool "Atmel ST timer support" if COMPILE_TEST 258 + depends on GENERIC_CLOCKEVENTS 341 259 select CLKSRC_OF 342 260 select MFD_SYSCON 261 + help 262 + Support for the Atmel ST timer. 343 263 344 264 config CLKSRC_METAG_GENERIC 345 265 def_bool y if METAG ··· 356 270 Support for Multi Core Timer controller on Exynos SoCs. 357 271 358 272 config CLKSRC_SAMSUNG_PWM 359 - bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST 273 + bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST 360 274 depends on GENERIC_CLOCKEVENTS 361 275 depends on HAS_IOMEM 362 276 help ··· 378 292 select CLKSRC_MMIO 379 293 help 380 294 Support for Period Interrupt Timer on Freescale Vybrid Family SoCs. 295 + 296 + config OXNAS_RPS_TIMER 297 + bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST 298 + depends on GENERIC_CLOCKEVENTS 299 + select CLKSRC_OF 300 + select CLKSRC_MMIO 301 + help 302 + This enables support for the Oxford Semiconductor OXNAS RPS timers. 381 303 382 304 config SYS_SUPPORTS_SH_CMT 383 305 bool ··· 455 361 Qualcomm SoCs. 456 362 457 363 config CLKSRC_VERSATILE 458 - bool "ARM Versatile (Express) reference platforms clock source" 459 - depends on PLAT_VERSATILE && GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET 364 + bool "ARM Versatile (Express) reference platforms clock source" if COMPILE_TEST 365 + depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET 460 366 select CLKSRC_OF 461 367 default y if MFD_VEXPRESS_SYSREG 462 368 help
+12 -11
drivers/clocksource/Makefile
··· 19 19 obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o 20 20 obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o 21 21 obj-$(CONFIG_ORION_TIMER) += time-orion.o 22 - obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o 23 - obj-$(CONFIG_ARCH_CLPS711X) += clps711x-timer.o 24 - obj-$(CONFIG_ARCH_ATLAS7) += timer-atlas7.o 25 - obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o 26 - obj-$(CONFIG_ARCH_MXS) += mxs_timer.o 22 + obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o 23 + obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o 24 + obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o 25 + obj-$(CONFIG_MOXART_TIMER) += moxart_timer.o 26 + obj-$(CONFIG_MXS_TIMER) += mxs_timer.o 27 27 obj-$(CONFIG_CLKSRC_PXA) += pxa_timer.o 28 - obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o 29 - obj-$(CONFIG_ARCH_U300) += timer-u300.o 28 + obj-$(CONFIG_PRIMA2_TIMER) += timer-prima2.o 29 + obj-$(CONFIG_U300_TIMER) += timer-u300.o 30 30 obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o 31 31 obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o 32 32 obj-$(CONFIG_MESON6_TIMER) += meson6_timer.o 33 33 obj-$(CONFIG_TEGRA_TIMER) += tegra20_timer.o 34 34 obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o 35 - obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o 36 - obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm_kona_timer.o 35 + obj-$(CONFIG_NSPIRE_TIMER) += zevio-timer.o 36 + obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o 37 37 obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o 38 38 obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o 39 39 obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o ··· 48 48 obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o 49 49 obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o 50 50 obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o 51 + obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o 51 52 52 53 obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o 53 54 obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o ··· 56 55 obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o 57 56 obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o 58 57 obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o 59 - obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o 60 - obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o 58 + obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o 59 + obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o 61 60 obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o 62 61 obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o 63 62 obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
+39 -17
drivers/clocksource/arm_arch_timer.c
··· 79 79 static bool arch_timer_c3stop; 80 80 static bool arch_timer_mem_use_virtual; 81 81 82 + static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); 83 + 84 + static int __init early_evtstrm_cfg(char *buf) 85 + { 86 + return strtobool(buf, &evtstrm_enable); 87 + } 88 + early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg); 89 + 82 90 /* 83 91 * Architected system timer support. 84 92 */ ··· 380 372 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); 381 373 382 374 arch_counter_set_user_access(); 383 - if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM)) 375 + if (evtstrm_enable) 384 376 arch_timer_configure_evtstream(); 385 377 386 378 return 0; ··· 701 693 return needs_probing; 702 694 } 703 695 704 - static void __init arch_timer_common_init(void) 696 + static int __init arch_timer_common_init(void) 705 697 { 706 698 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER; 707 699 708 700 /* Wait until both nodes are probed if we have two timers */ 709 701 if ((arch_timers_present & mask) != mask) { 710 702 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match)) 711 - return; 703 + return 0; 712 704 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match)) 713 - return; 705 + return 0; 714 706 } 715 707 716 708 arch_timer_banner(arch_timers_present); 717 709 arch_counter_register(arch_timers_present); 718 - arch_timer_arch_init(); 710 + return arch_timer_arch_init(); 719 711 } 720 712 721 - static void __init arch_timer_init(void) 713 + static int __init arch_timer_init(void) 722 714 { 715 + int ret; 723 716 /* 724 717 * If HYP mode is available, we know that the physical timer 725 718 * has been configured to be accessible from PL1. Use it, so ··· 748 739 749 740 if (!has_ppi) { 750 741 pr_warn("arch_timer: No interrupt available, giving up\n"); 751 - return; 742 + return -EINVAL; 752 743 } 753 744 } 754 745 755 - arch_timer_register(); 756 - arch_timer_common_init(); 746 + ret = arch_timer_register(); 747 + if (ret) 748 + return ret; 749 + 750 + ret = arch_timer_common_init(); 751 + if (ret) 752 + return ret; 757 753 758 754 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI]; 755 + 756 + return 0; 759 757 } 760 758 761 - static void __init arch_timer_of_init(struct device_node *np) 759 + static int __init arch_timer_of_init(struct device_node *np) 762 760 { 763 761 int i; 764 762 765 763 if (arch_timers_present & ARCH_CP15_TIMER) { 766 764 pr_warn("arch_timer: multiple nodes in dt, skipping\n"); 767 - return; 765 + return 0; 768 766 } 769 767 770 768 arch_timers_present |= ARCH_CP15_TIMER; ··· 790 774 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured")) 791 775 arch_timer_uses_ppi = PHYS_SECURE_PPI; 792 776 793 - arch_timer_init(); 777 + return arch_timer_init(); 794 778 } 795 779 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init); 796 780 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init); 797 781 798 - static void __init arch_timer_mem_init(struct device_node *np) 782 + static int __init arch_timer_mem_init(struct device_node *np) 799 783 { 800 784 struct device_node *frame, *best_frame = NULL; 801 785 void __iomem *cntctlbase, *base; 802 - unsigned int irq; 786 + unsigned int irq, ret = -EINVAL; 803 787 u32 cnttidr; 804 788 805 789 arch_timers_present |= ARCH_MEM_TIMER; 806 790 cntctlbase = of_iomap(np, 0); 807 791 if (!cntctlbase) { 808 792 pr_err("arch_timer: Can't find CNTCTLBase\n"); 809 - return; 793 + return -ENXIO; 810 794 } 811 795 812 796 cnttidr = readl_relaxed(cntctlbase + CNTTIDR); ··· 846 830 best_frame = of_node_get(frame); 847 831 } 848 832 833 + ret= -ENXIO; 849 834 base = arch_counter_base = of_iomap(best_frame, 0); 850 835 if (!base) { 851 836 pr_err("arch_timer: Can't map frame's registers\n"); ··· 858 841 else 859 842 irq = irq_of_parse_and_map(best_frame, 0); 860 843 844 + ret = -EINVAL; 861 845 if (!irq) { 862 846 pr_err("arch_timer: Frame missing %s irq", 863 847 arch_timer_mem_use_virtual ? "virt" : "phys"); ··· 866 848 } 867 849 868 850 arch_timer_detect_rate(base, np); 869 - arch_timer_mem_register(base, irq); 870 - arch_timer_common_init(); 851 + ret = arch_timer_mem_register(base, irq); 852 + if (ret) 853 + goto out; 854 + 855 + return arch_timer_common_init(); 871 856 out: 872 857 iounmap(cntctlbase); 873 858 of_node_put(best_frame); 859 + return ret; 874 860 } 875 861 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", 876 862 arch_timer_mem_init);
+17 -9
drivers/clocksource/arm_global_timer.c
··· 238 238 register_current_timer_delay(&gt_delay_timer); 239 239 } 240 240 241 - static void __init gt_clocksource_init(void) 241 + static int __init gt_clocksource_init(void) 242 242 { 243 243 writel(0, gt_base + GT_CONTROL); 244 244 writel(0, gt_base + GT_COUNTER0); ··· 249 249 #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK 250 250 sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate); 251 251 #endif 252 - clocksource_register_hz(&gt_clocksource, gt_clk_rate); 252 + return clocksource_register_hz(&gt_clocksource, gt_clk_rate); 253 253 } 254 254 255 255 static int gt_cpu_notify(struct notifier_block *self, unsigned long action, ··· 270 270 .notifier_call = gt_cpu_notify, 271 271 }; 272 272 273 - static void __init global_timer_of_register(struct device_node *np) 273 + static int __init global_timer_of_register(struct device_node *np) 274 274 { 275 275 struct clk *gt_clk; 276 276 int err = 0; ··· 283 283 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9 284 284 && (read_cpuid_id() & 0xf0000f) < 0x200000) { 285 285 pr_warn("global-timer: non support for this cpu version.\n"); 286 - return; 286 + return -ENOSYS; 287 287 } 288 288 289 289 gt_ppi = irq_of_parse_and_map(np, 0); 290 290 if (!gt_ppi) { 291 291 pr_warn("global-timer: unable to parse irq\n"); 292 - return; 292 + return -EINVAL; 293 293 } 294 294 295 295 gt_base = of_iomap(np, 0); 296 296 if (!gt_base) { 297 297 pr_warn("global-timer: invalid base address\n"); 298 - return; 298 + return -ENXIO; 299 299 } 300 300 301 301 gt_clk = of_clk_get(np, 0); ··· 332 332 } 333 333 334 334 /* Immediately configure the timer on the boot CPU */ 335 - gt_clocksource_init(); 336 - gt_clockevents_init(this_cpu_ptr(gt_evt)); 335 + err = gt_clocksource_init(); 336 + if (err) 337 + goto out_irq; 338 + 339 + err = gt_clockevents_init(this_cpu_ptr(gt_evt)); 340 + if (err) 341 + goto out_irq; 342 + 337 343 gt_delay_timer_init(); 338 344 339 - return; 345 + return 0; 340 346 341 347 out_irq: 342 348 free_percpu_irq(gt_ppi, gt_evt); ··· 353 347 out_unmap: 354 348 iounmap(gt_base); 355 349 WARN(err, "ARM Global timer register failed (%d)\n", err); 350 + 351 + return err; 356 352 } 357 353 358 354 /* Only tested on r2p2 and r3p0 */
+12 -5
drivers/clocksource/armv7m_systick.c
··· 7 7 #include <linux/kernel.h> 8 8 #include <linux/clocksource.h> 9 9 #include <linux/clockchips.h> 10 + #include <linux/io.h> 10 11 #include <linux/of.h> 11 12 #include <linux/of_address.h> 12 13 #include <linux/clk.h> ··· 22 21 23 22 #define SYSTICK_LOAD_RELOAD_MASK 0x00FFFFFF 24 23 25 - static void __init system_timer_of_register(struct device_node *np) 24 + static int __init system_timer_of_register(struct device_node *np) 26 25 { 27 26 struct clk *clk = NULL; 28 27 void __iomem *base; ··· 32 31 base = of_iomap(np, 0); 33 32 if (!base) { 34 33 pr_warn("system-timer: invalid base address\n"); 35 - return; 34 + return -ENXIO; 36 35 } 37 36 38 37 ret = of_property_read_u32(np, "clock-frequency", &rate); 39 38 if (ret) { 40 39 clk = of_clk_get(np, 0); 41 - if (IS_ERR(clk)) 40 + if (IS_ERR(clk)) { 41 + ret = PTR_ERR(clk); 42 42 goto out_unmap; 43 + } 43 44 44 45 ret = clk_prepare_enable(clk); 45 46 if (ret) 46 47 goto out_clk_put; 47 48 48 49 rate = clk_get_rate(clk); 49 - if (!rate) 50 + if (!rate) { 51 + ret = -EINVAL; 50 52 goto out_clk_disable; 53 + } 51 54 } 52 55 53 56 writel_relaxed(SYSTICK_LOAD_RELOAD_MASK, base + SYST_RVR); ··· 69 64 70 65 pr_info("ARM System timer initialized as clocksource\n"); 71 66 72 - return; 67 + return 0; 73 68 74 69 out_clk_disable: 75 70 clk_disable_unprepare(clk); ··· 78 73 out_unmap: 79 74 iounmap(base); 80 75 pr_warn("ARM System timer register failed (%d)\n", ret); 76 + 77 + return ret; 81 78 } 82 79 83 80 CLOCKSOURCE_OF_DECLARE(arm_systick, "arm,armv7m-systick",
+15 -7
drivers/clocksource/asm9260_timer.c
··· 184 184 * Timer initialization 185 185 * --------------------------------------------------------------------------- 186 186 */ 187 - static void __init asm9260_timer_init(struct device_node *np) 187 + static int __init asm9260_timer_init(struct device_node *np) 188 188 { 189 189 int irq; 190 190 struct clk *clk; ··· 192 192 unsigned long rate; 193 193 194 194 priv.base = of_io_request_and_map(np, 0, np->name); 195 - if (IS_ERR(priv.base)) 196 - panic("%s: unable to map resource", np->name); 195 + if (IS_ERR(priv.base)) { 196 + pr_err("%s: unable to map resource", np->name); 197 + return PTR_ERR(priv.base); 198 + } 197 199 198 200 clk = of_clk_get(np, 0); 199 201 200 202 ret = clk_prepare_enable(clk); 201 - if (ret) 202 - panic("Failed to enable clk!\n"); 203 + if (ret) { 204 + pr_err("Failed to enable clk!\n"); 205 + return ret; 206 + } 203 207 204 208 irq = irq_of_parse_and_map(np, 0); 205 209 ret = request_irq(irq, asm9260_timer_interrupt, IRQF_TIMER, 206 210 DRIVER_NAME, &event_dev); 207 - if (ret) 208 - panic("Failed to setup irq!\n"); 211 + if (ret) { 212 + pr_err("Failed to setup irq!\n"); 213 + return ret; 214 + } 209 215 210 216 /* set all timers for count-up */ 211 217 writel_relaxed(BM_DIR_DEFAULT, priv.base + HW_DIR); ··· 235 229 priv.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ); 236 230 event_dev.cpumask = cpumask_of(0); 237 231 clockevents_config_and_register(&event_dev, rate, 0x2c00, 0xfffffffe); 232 + 233 + return 0; 238 234 } 239 235 CLOCKSOURCE_OF_DECLARE(asm9260_timer, "alphascale,asm9260-timer", 240 236 asm9260_timer_init);
+26 -12
drivers/clocksource/bcm2835_timer.c
··· 80 80 } 81 81 } 82 82 83 - static void __init bcm2835_timer_init(struct device_node *node) 83 + static int __init bcm2835_timer_init(struct device_node *node) 84 84 { 85 85 void __iomem *base; 86 86 u32 freq; 87 - int irq; 87 + int irq, ret; 88 88 struct bcm2835_timer *timer; 89 89 90 90 base = of_iomap(node, 0); 91 - if (!base) 92 - panic("Can't remap registers"); 91 + if (!base) { 92 + pr_err("Can't remap registers"); 93 + return -ENXIO; 94 + } 93 95 94 - if (of_property_read_u32(node, "clock-frequency", &freq)) 95 - panic("Can't read clock-frequency"); 96 + ret = of_property_read_u32(node, "clock-frequency", &freq); 97 + if (ret) { 98 + pr_err("Can't read clock-frequency"); 99 + return ret; 100 + } 96 101 97 102 system_clock = base + REG_COUNTER_LO; 98 103 sched_clock_register(bcm2835_sched_read, 32, freq); ··· 106 101 freq, 300, 32, clocksource_mmio_readl_up); 107 102 108 103 irq = irq_of_parse_and_map(node, DEFAULT_TIMER); 109 - if (irq <= 0) 110 - panic("Can't parse IRQ"); 104 + if (irq <= 0) { 105 + pr_err("Can't parse IRQ"); 106 + return -EINVAL; 107 + } 111 108 112 109 timer = kzalloc(sizeof(*timer), GFP_KERNEL); 113 - if (!timer) 114 - panic("Can't allocate timer struct\n"); 110 + if (!timer) { 111 + pr_err("Can't allocate timer struct\n"); 112 + return -ENOMEM; 113 + } 115 114 116 115 timer->control = base + REG_CONTROL; 117 116 timer->compare = base + REG_COMPARE(DEFAULT_TIMER); ··· 130 121 timer->act.dev_id = timer; 131 122 timer->act.handler = bcm2835_time_interrupt; 132 123 133 - if (setup_irq(irq, &timer->act)) 134 - panic("Can't set up timer IRQ\n"); 124 + ret = setup_irq(irq, &timer->act); 125 + if (ret) { 126 + pr_err("Can't set up timer IRQ\n"); 127 + return ret; 128 + } 135 129 136 130 clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff); 137 131 138 132 pr_info("bcm2835: system timer (irq = %d)\n", irq); 133 + 134 + return 0; 139 135 } 140 136 CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer", 141 137 bcm2835_timer_init);
+4 -8
drivers/clocksource/bcm_kona_timer.c
··· 20 20 #include <linux/clk.h> 21 21 22 22 #include <linux/io.h> 23 - #include <asm/mach/time.h> 24 23 25 24 #include <linux/of.h> 26 25 #include <linux/of_address.h> ··· 162 163 .handler = kona_timer_interrupt, 163 164 }; 164 165 165 - static void __init kona_timer_init(struct device_node *node) 166 + static int __init kona_timer_init(struct device_node *node) 166 167 { 167 168 u32 freq; 168 169 struct clk *external_clk; 169 - 170 - if (!of_device_is_available(node)) { 171 - pr_info("Kona Timer v1 marked as disabled in device tree\n"); 172 - return; 173 - } 174 170 175 171 external_clk = of_clk_get_by_name(node, NULL); 176 172 ··· 176 182 arch_timer_rate = freq; 177 183 } else { 178 184 pr_err("Kona Timer v1 unable to determine clock-frequency"); 179 - return; 185 + return -EINVAL; 180 186 } 181 187 182 188 /* Setup IRQ numbers */ ··· 190 196 kona_timer_clockevents_init(); 191 197 setup_irq(timers.tmr_irq, &kona_timer_irq); 192 198 kona_timer_set_next_event((arch_timer_rate / HZ), NULL); 199 + 200 + return 0; 193 201 } 194 202 195 203 CLOCKSOURCE_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init);
+46 -28
drivers/clocksource/cadence_ttc_timer.c
··· 322 322 return NOTIFY_DONE; 323 323 } 324 324 325 - static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base, 325 + static int __init ttc_setup_clocksource(struct clk *clk, void __iomem *base, 326 326 u32 timer_width) 327 327 { 328 328 struct ttc_timer_clocksource *ttccs; 329 329 int err; 330 330 331 331 ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL); 332 - if (WARN_ON(!ttccs)) 333 - return; 332 + if (!ttccs) 333 + return -ENOMEM; 334 334 335 335 ttccs->ttc.clk = clk; 336 336 337 337 err = clk_prepare_enable(ttccs->ttc.clk); 338 - if (WARN_ON(err)) { 338 + if (err) { 339 339 kfree(ttccs); 340 - return; 340 + return err; 341 341 } 342 342 343 343 ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk); ··· 345 345 ttccs->ttc.clk_rate_change_nb.notifier_call = 346 346 ttc_rate_change_clocksource_cb; 347 347 ttccs->ttc.clk_rate_change_nb.next = NULL; 348 - if (clk_notifier_register(ttccs->ttc.clk, 349 - &ttccs->ttc.clk_rate_change_nb)) 348 + 349 + err = clk_notifier_register(ttccs->ttc.clk, 350 + &ttccs->ttc.clk_rate_change_nb); 351 + if (err) 350 352 pr_warn("Unable to register clock notifier.\n"); 351 353 352 354 ttccs->ttc.base_addr = base; ··· 370 368 ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); 371 369 372 370 err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE); 373 - if (WARN_ON(err)) { 371 + if (err) { 374 372 kfree(ttccs); 375 - return; 373 + return err; 376 374 } 377 375 378 376 ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET; 379 377 sched_clock_register(ttc_sched_clock_read, timer_width, 380 378 ttccs->ttc.freq / PRESCALE); 379 + 380 + return 0; 381 381 } 382 382 383 383 static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, ··· 405 401 } 406 402 } 407 403 408 - static void __init ttc_setup_clockevent(struct clk *clk, 409 - void __iomem *base, u32 irq) 404 + static int __init ttc_setup_clockevent(struct clk *clk, 405 + void __iomem *base, u32 irq) 410 406 { 411 407 struct ttc_timer_clockevent *ttcce; 412 408 int err; 413 409 414 410 ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL); 415 - if (WARN_ON(!ttcce)) 416 - return; 411 + if (!ttcce) 412 + return -ENOMEM; 417 413 418 414 ttcce->ttc.clk = clk; 419 415 420 416 err = clk_prepare_enable(ttcce->ttc.clk); 421 - if (WARN_ON(err)) { 417 + if (err) { 422 418 kfree(ttcce); 423 - return; 419 + return err; 424 420 } 425 421 426 422 ttcce->ttc.clk_rate_change_nb.notifier_call = 427 423 ttc_rate_change_clockevent_cb; 428 424 ttcce->ttc.clk_rate_change_nb.next = NULL; 429 - if (clk_notifier_register(ttcce->ttc.clk, 430 - &ttcce->ttc.clk_rate_change_nb)) 425 + 426 + err = clk_notifier_register(ttcce->ttc.clk, 427 + &ttcce->ttc.clk_rate_change_nb); 428 + if (err) { 431 429 pr_warn("Unable to register clock notifier.\n"); 430 + return err; 431 + } 432 + 432 433 ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk); 433 434 434 435 ttcce->ttc.base_addr = base; ··· 460 451 461 452 err = request_irq(irq, ttc_clock_event_interrupt, 462 453 IRQF_TIMER, ttcce->ce.name, ttcce); 463 - if (WARN_ON(err)) { 454 + if (err) { 464 455 kfree(ttcce); 465 - return; 456 + return err; 466 457 } 467 458 468 459 clockevents_config_and_register(&ttcce->ce, 469 460 ttcce->ttc.freq / PRESCALE, 1, 0xfffe); 461 + 462 + return 0; 470 463 } 471 464 472 465 /** ··· 477 466 * Initializes the timer hardware and register the clock source and clock event 478 467 * timers with Linux kernal timer framework 479 468 */ 480 - static void __init ttc_timer_init(struct device_node *timer) 469 + static int __init ttc_timer_init(struct device_node *timer) 481 470 { 482 471 unsigned int irq; 483 472 void __iomem *timer_baseaddr; 484 473 struct clk *clk_cs, *clk_ce; 485 474 static int initialized; 486 - int clksel; 475 + int clksel, ret; 487 476 u32 timer_width = 16; 488 477 489 478 if (initialized) 490 - return; 479 + return 0; 491 480 492 481 initialized = 1; 493 482 ··· 499 488 timer_baseaddr = of_iomap(timer, 0); 500 489 if (!timer_baseaddr) { 501 490 pr_err("ERROR: invalid timer base address\n"); 502 - BUG(); 491 + return -ENXIO; 503 492 } 504 493 505 494 irq = irq_of_parse_and_map(timer, 1); 506 495 if (irq <= 0) { 507 496 pr_err("ERROR: invalid interrupt number\n"); 508 - BUG(); 497 + return -EINVAL; 509 498 } 510 499 511 500 of_property_read_u32(timer, "timer-width", &timer_width); ··· 515 504 clk_cs = of_clk_get(timer, clksel); 516 505 if (IS_ERR(clk_cs)) { 517 506 pr_err("ERROR: timer input clock not found\n"); 518 - BUG(); 507 + return PTR_ERR(clk_cs); 519 508 } 520 509 521 510 clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET); ··· 523 512 clk_ce = of_clk_get(timer, clksel); 524 513 if (IS_ERR(clk_ce)) { 525 514 pr_err("ERROR: timer input clock not found\n"); 526 - BUG(); 515 + return PTR_ERR(clk_ce); 527 516 } 528 517 529 - ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width); 530 - ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq); 518 + ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width); 519 + if (ret) 520 + return ret; 521 + 522 + ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq); 523 + if (ret) 524 + return ret; 531 525 532 526 pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq); 527 + 528 + return 0; 533 529 } 534 530 535 531 CLOCKSOURCE_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
+2 -2
drivers/clocksource/clksrc-dbx500-prcmu.c
··· 64 64 65 65 #endif 66 66 67 - static void __init clksrc_dbx500_prcmu_init(struct device_node *node) 67 + static int __init clksrc_dbx500_prcmu_init(struct device_node *node) 68 68 { 69 69 clksrc_dbx500_timer_base = of_iomap(node, 0); 70 70 ··· 84 84 #ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK 85 85 sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K); 86 86 #endif 87 - clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K); 87 + return clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K); 88 88 } 89 89 CLOCKSOURCE_OF_DECLARE(dbx500_prcmu, "stericsson,db8500-prcmu-timer-4", 90 90 clksrc_dbx500_prcmu_init);
+11 -3
drivers/clocksource/clksrc-probe.c
··· 28 28 { 29 29 struct device_node *np; 30 30 const struct of_device_id *match; 31 - of_init_fn_1 init_func; 31 + of_init_fn_1_ret init_func_ret; 32 32 unsigned clocksources = 0; 33 + int ret; 33 34 34 35 for_each_matching_node_and_match(np, __clksrc_of_table, &match) { 35 36 if (!of_device_is_available(np)) 36 37 continue; 37 38 38 - init_func = match->data; 39 - init_func(np); 39 + init_func_ret = match->data; 40 + 41 + ret = init_func_ret(np); 42 + if (ret) { 43 + pr_err("Failed to initialize '%s': %d", 44 + of_node_full_name(np), ret); 45 + continue; 46 + } 47 + 40 48 clocksources++; 41 49 } 42 50
+12 -8
drivers/clocksource/clksrc_st_lpc.c
··· 92 92 return 0; 93 93 } 94 94 95 - static void __init st_clksrc_of_register(struct device_node *np) 95 + static int __init st_clksrc_of_register(struct device_node *np) 96 96 { 97 97 int ret; 98 98 uint32_t mode; ··· 100 100 ret = of_property_read_u32(np, "st,lpc-mode", &mode); 101 101 if (ret) { 102 102 pr_err("clksrc-st-lpc: An LPC mode must be provided\n"); 103 - return; 103 + return ret; 104 104 } 105 105 106 106 /* LPC can either run as a Clocksource or in RTC or WDT mode */ 107 107 if (mode != ST_LPC_MODE_CLKSRC) 108 - return; 108 + return 0; 109 109 110 110 ddata.base = of_iomap(np, 0); 111 111 if (!ddata.base) { 112 112 pr_err("clksrc-st-lpc: Unable to map iomem\n"); 113 - return; 113 + return -ENXIO; 114 114 } 115 115 116 - if (st_clksrc_setup_clk(np)) { 116 + ret = st_clksrc_setup_clk(np); 117 + if (ret) { 117 118 iounmap(ddata.base); 118 - return; 119 + return ret; 119 120 } 120 121 121 - if (st_clksrc_init()) { 122 + ret = st_clksrc_init(); 123 + if (ret) { 122 124 clk_disable_unprepare(ddata.clk); 123 125 clk_put(ddata.clk); 124 126 iounmap(ddata.base); 125 - return; 127 + return ret; 126 128 } 127 129 128 130 pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n", 129 131 clk_get_rate(ddata.clk)); 132 + 133 + return ret; 130 134 } 131 135 CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register);
+4 -6
drivers/clocksource/clps711x-timer.c
··· 104 104 } 105 105 106 106 #ifdef CONFIG_CLKSRC_OF 107 - static void __init clps711x_timer_init(struct device_node *np) 107 + static int __init clps711x_timer_init(struct device_node *np) 108 108 { 109 109 unsigned int irq = irq_of_parse_and_map(np, 0); 110 110 struct clk *clock = of_clk_get(np, 0); ··· 112 112 113 113 switch (of_alias_get_id(np, "timer")) { 114 114 case CLPS711X_CLKSRC_CLOCKSOURCE: 115 - BUG_ON(_clps711x_clksrc_init(clock, base)); 116 - break; 115 + return _clps711x_clksrc_init(clock, base); 117 116 case CLPS711X_CLKSRC_CLOCKEVENT: 118 - BUG_ON(_clps711x_clkevt_init(clock, base, irq)); 119 - break; 117 + return _clps711x_clkevt_init(clock, base, irq); 120 118 default: 121 - break; 119 + return -EINVAL; 122 120 } 123 121 } 124 122 CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init);
+3 -1
drivers/clocksource/dw_apb_timer_of.c
··· 143 143 #endif 144 144 145 145 static int num_called; 146 - static void __init dw_apb_timer_init(struct device_node *timer) 146 + static int __init dw_apb_timer_init(struct device_node *timer) 147 147 { 148 148 switch (num_called) { 149 149 case 0: ··· 164 164 } 165 165 166 166 num_called++; 167 + 168 + return 0; 167 169 } 168 170 CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); 169 171 CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
+22 -10
drivers/clocksource/exynos_mct.c
··· 232 232 return exynos4_read_count_32(); 233 233 } 234 234 235 - static void __init exynos4_clocksource_init(void) 235 + static int __init exynos4_clocksource_init(void) 236 236 { 237 237 exynos4_mct_frc_start(); 238 238 ··· 244 244 panic("%s: can't register clocksource\n", mct_frc.name); 245 245 246 246 sched_clock_register(exynos4_read_sched_clock, 32, clk_rate); 247 + 248 + return 0; 247 249 } 248 250 249 251 static void exynos4_mct_comp0_stop(void) ··· 337 335 .dev_id = &mct_comp_device, 338 336 }; 339 337 340 - static void exynos4_clockevent_init(void) 338 + static int exynos4_clockevent_init(void) 341 339 { 342 340 mct_comp_device.cpumask = cpumask_of(0); 343 341 clockevents_config_and_register(&mct_comp_device, clk_rate, 344 342 0xf, 0xffffffff); 345 343 setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); 344 + 345 + return 0; 346 346 } 347 347 348 348 static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); ··· 520 516 .notifier_call = exynos4_mct_cpu_notify, 521 517 }; 522 518 523 - static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) 519 + static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base) 524 520 { 525 521 int err, cpu; 526 522 struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); ··· 576 572 577 573 /* Immediately configure the timer on the boot CPU */ 578 574 exynos4_local_timer_setup(mevt); 579 - return; 575 + return 0; 580 576 581 577 out_irq: 582 578 free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); 579 + return err; 583 580 } 584 581 585 - static void __init mct_init_dt(struct device_node *np, unsigned int int_type) 582 + static int __init mct_init_dt(struct device_node *np, unsigned int int_type) 586 583 { 587 584 u32 nr_irqs, i; 585 + int ret; 588 586 589 587 mct_int_type = int_type; 590 588 ··· 606 600 for (i = MCT_L0_IRQ; i < nr_irqs; i++) 607 601 mct_irqs[i] = irq_of_parse_and_map(np, i); 608 602 609 - exynos4_timer_resources(np, of_iomap(np, 0)); 610 - exynos4_clocksource_init(); 611 - exynos4_clockevent_init(); 603 + ret = exynos4_timer_resources(np, of_iomap(np, 0)); 604 + if (ret) 605 + return ret; 606 + 607 + ret = exynos4_clocksource_init(); 608 + if (ret) 609 + return ret; 610 + 611 + return exynos4_clockevent_init(); 612 612 } 613 613 614 614 615 - static void __init mct_init_spi(struct device_node *np) 615 + static int __init mct_init_spi(struct device_node *np) 616 616 { 617 617 return mct_init_dt(np, MCT_INT_SPI); 618 618 } 619 619 620 - static void __init mct_init_ppi(struct device_node *np) 620 + static int __init mct_init_ppi(struct device_node *np) 621 621 { 622 622 return mct_init_dt(np, MCT_INT_PPI); 623 623 }
+13 -7
drivers/clocksource/fsl_ftm_timer.c
··· 316 316 return 0; 317 317 } 318 318 319 - static void __init ftm_timer_init(struct device_node *np) 319 + static int __init ftm_timer_init(struct device_node *np) 320 320 { 321 321 unsigned long freq; 322 - int irq; 322 + int ret, irq; 323 323 324 324 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 325 325 if (!priv) 326 - return; 326 + return -ENOMEM; 327 327 328 + ret = -ENXIO; 328 329 priv->clkevt_base = of_iomap(np, 0); 329 330 if (!priv->clkevt_base) { 330 331 pr_err("ftm: unable to map event timer registers\n"); ··· 338 337 goto err; 339 338 } 340 339 340 + ret = -EINVAL; 341 341 irq = irq_of_parse_and_map(np, 0); 342 342 if (irq <= 0) { 343 343 pr_err("ftm: unable to get IRQ from DT, %d\n", irq); ··· 351 349 if (!freq) 352 350 goto err; 353 351 354 - if (ftm_calc_closest_round_cyc(freq)) 352 + ret = ftm_calc_closest_round_cyc(freq); 353 + if (ret) 355 354 goto err; 356 355 357 - if (ftm_clocksource_init(freq)) 356 + ret = ftm_clocksource_init(freq); 357 + if (ret) 358 358 goto err; 359 359 360 - if (ftm_clockevent_init(freq, irq)) 360 + ret = ftm_clockevent_init(freq, irq); 361 + if (ret) 361 362 goto err; 362 363 363 - return; 364 + return 0; 364 365 365 366 err: 366 367 kfree(priv); 368 + return ret; 367 369 } 368 370 CLOCKSOURCE_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init);
+8 -4
drivers/clocksource/h8300_timer16.c
··· 126 126 #define REG_CH 0 127 127 #define REG_COMM 1 128 128 129 - static void __init h8300_16timer_init(struct device_node *node) 129 + static int __init h8300_16timer_init(struct device_node *node) 130 130 { 131 131 void __iomem *base[2]; 132 132 int ret, irq; ··· 136 136 clk = of_clk_get(node, 0); 137 137 if (IS_ERR(clk)) { 138 138 pr_err("failed to get clock for clocksource\n"); 139 - return; 139 + return PTR_ERR(clk); 140 140 } 141 141 142 + ret = -ENXIO; 142 143 base[REG_CH] = of_iomap(node, 0); 143 144 if (!base[REG_CH]) { 144 145 pr_err("failed to map registers for clocksource\n"); ··· 152 151 goto unmap_ch; 153 152 } 154 153 154 + ret = -EINVAL; 155 155 irq = irq_of_parse_and_map(node, 0); 156 156 if (!irq) { 157 157 pr_err("failed to get irq for clockevent\n"); ··· 176 174 177 175 clocksource_register_hz(&timer16_priv.cs, 178 176 clk_get_rate(clk) / 8); 179 - return; 177 + return 0; 180 178 181 179 unmap_comm: 182 180 iounmap(base[REG_COMM]); ··· 184 182 iounmap(base[REG_CH]); 185 183 free_clk: 186 184 clk_put(clk); 185 + return ret; 187 186 } 188 187 189 - CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer", h8300_16timer_init); 188 + CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer", 189 + h8300_16timer_init);
+7 -4
drivers/clocksource/h8300_timer8.c
··· 164 164 }, 165 165 }; 166 166 167 - static void __init h8300_8timer_init(struct device_node *node) 167 + static int __init h8300_8timer_init(struct device_node *node) 168 168 { 169 169 void __iomem *base; 170 - int irq; 170 + int irq, ret; 171 171 struct clk *clk; 172 172 173 173 clk = of_clk_get(node, 0); 174 174 if (IS_ERR(clk)) { 175 175 pr_err("failed to get clock for clockevent\n"); 176 - return; 176 + return PTR_ERR(clk); 177 177 } 178 178 179 + ret = ENXIO; 179 180 base = of_iomap(node, 0); 180 181 if (!base) { 181 182 pr_err("failed to map registers for clockevent\n"); 182 183 goto free_clk; 183 184 } 184 185 186 + ret = -EINVAL; 185 187 irq = irq_of_parse_and_map(node, 0); 186 188 if (!irq) { 187 189 pr_err("failed to get irq for clockevent\n"); ··· 207 205 clockevents_config_and_register(&timer8_priv.ced, 208 206 timer8_priv.rate, 1, 0x0000ffff); 209 207 210 - return; 208 + return 0; 211 209 unmap_reg: 212 210 iounmap(base); 213 211 free_clk: 214 212 clk_put(clk); 213 + return ret; 215 214 } 216 215 217 216 CLOCKSOURCE_OF_DECLARE(h8300_8bit, "renesas,8bit-timer", h8300_8timer_init);
+5 -5
drivers/clocksource/h8300_tpu.c
··· 119 119 #define CH_L 0 120 120 #define CH_H 1 121 121 122 - static void __init h8300_tpu_init(struct device_node *node) 122 + static int __init h8300_tpu_init(struct device_node *node) 123 123 { 124 124 void __iomem *base[2]; 125 125 struct clk *clk; 126 + int ret = -ENXIO; 126 127 127 128 clk = of_clk_get(node, 0); 128 129 if (IS_ERR(clk)) { 129 130 pr_err("failed to get clock for clocksource\n"); 130 - return; 131 + return PTR_ERR(clk); 131 132 } 132 133 133 134 base[CH_L] = of_iomap(node, CH_L); ··· 145 144 tpu_priv.mapbase1 = base[CH_L]; 146 145 tpu_priv.mapbase2 = base[CH_H]; 147 146 148 - clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64); 149 - 150 - return; 147 + return clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64); 151 148 152 149 unmap_L: 153 150 iounmap(base[CH_H]); 154 151 free_clk: 155 152 clk_put(clk); 153 + return ret; 156 154 } 157 155 158 156 CLOCKSOURCE_OF_DECLARE(h8300_tpu, "renesas,tpu", h8300_tpu_init);
+13 -6
drivers/clocksource/meson6_timer.c
··· 126 126 .dev_id = &meson6_clockevent, 127 127 }; 128 128 129 - static void __init meson6_timer_init(struct device_node *node) 129 + static int __init meson6_timer_init(struct device_node *node) 130 130 { 131 131 u32 val; 132 132 int ret, irq; 133 133 134 134 timer_base = of_io_request_and_map(node, 0, "meson6-timer"); 135 - if (IS_ERR(timer_base)) 136 - panic("Can't map registers"); 135 + if (IS_ERR(timer_base)) { 136 + pr_err("Can't map registers"); 137 + return -ENXIO; 138 + } 137 139 138 140 irq = irq_of_parse_and_map(node, 0); 139 - if (irq <= 0) 140 - panic("Can't parse IRQ"); 141 + if (irq <= 0) { 142 + pr_err("Can't parse IRQ"); 143 + return -EINVAL; 144 + } 141 145 142 146 /* Set 1us for timer E */ 143 147 val = readl(timer_base + TIMER_ISA_MUX); ··· 162 158 meson6_clkevt_time_stop(CED_ID); 163 159 164 160 ret = setup_irq(irq, &meson6_timer_irq); 165 - if (ret) 161 + if (ret) { 166 162 pr_warn("failed to setup irq %d\n", irq); 163 + return ret; 164 + } 167 165 168 166 meson6_clockevent.cpumask = cpu_possible_mask; 169 167 meson6_clockevent.irq = irq; 170 168 171 169 clockevents_config_and_register(&meson6_clockevent, USEC_PER_SEC, 172 170 1, 0xfffe); 171 + return 0; 173 172 } 174 173 CLOCKSOURCE_OF_DECLARE(meson6, "amlogic,meson6-timer", 175 174 meson6_timer_init);
+16 -8
drivers/clocksource/mips-gic-timer.c
··· 146 146 .archdata = { .vdso_clock_mode = VDSO_CLOCK_GIC }, 147 147 }; 148 148 149 - static void __init __gic_clocksource_init(void) 149 + static int __init __gic_clocksource_init(void) 150 150 { 151 151 int ret; 152 152 ··· 159 159 ret = clocksource_register_hz(&gic_clocksource, gic_frequency); 160 160 if (ret < 0) 161 161 pr_warn("GIC: Unable to register clocksource\n"); 162 + 163 + return ret; 162 164 } 163 165 164 166 void __init gic_clocksource_init(unsigned int frequency) ··· 181 179 struct clk *clk; 182 180 int ret; 183 181 184 - if (WARN_ON(!gic_present || !node->parent || 185 - !of_device_is_compatible(node->parent, "mti,gic"))) 186 - return; 182 + if (!gic_present || !node->parent || 183 + !of_device_is_compatible(node->parent, "mti,gic")) { 184 + pr_warn("No DT definition for the mips gic driver"); 185 + return -ENXIO; 186 + } 187 187 188 188 clk = of_clk_get(node, 0); 189 189 if (!IS_ERR(clk)) { 190 190 if (clk_prepare_enable(clk) < 0) { 191 191 pr_err("GIC failed to enable clock\n"); 192 192 clk_put(clk); 193 - return; 193 + return PTR_ERR(clk); 194 194 } 195 195 196 196 gic_frequency = clk_get_rate(clk); 197 197 } else if (of_property_read_u32(node, "clock-frequency", 198 198 &gic_frequency)) { 199 199 pr_err("GIC frequency not specified.\n"); 200 - return; 200 + return -EINVAL;; 201 201 } 202 202 gic_timer_irq = irq_of_parse_and_map(node, 0); 203 203 if (!gic_timer_irq) { 204 204 pr_err("GIC timer IRQ not specified.\n"); 205 - return; 205 + return -EINVAL;; 206 206 } 207 207 208 - __gic_clocksource_init(); 208 + ret = __gic_clocksource_init(); 209 + if (ret) 210 + return ret; 209 211 210 212 ret = gic_clockevent_init(); 211 213 if (!ret && !IS_ERR(clk)) { ··· 219 213 220 214 /* And finally start the counter */ 221 215 gic_start_count(); 216 + 217 + return 0; 222 218 } 223 219 CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer", 224 220 gic_clocksource_of_init);
+26 -13
drivers/clocksource/moxart_timer.c
··· 119 119 .dev_id = &moxart_clockevent, 120 120 }; 121 121 122 - static void __init moxart_timer_init(struct device_node *node) 122 + static int __init moxart_timer_init(struct device_node *node) 123 123 { 124 124 int ret, irq; 125 125 unsigned long pclk; 126 126 struct clk *clk; 127 127 128 128 base = of_iomap(node, 0); 129 - if (!base) 130 - panic("%s: of_iomap failed\n", node->full_name); 129 + if (!base) { 130 + pr_err("%s: of_iomap failed\n", node->full_name); 131 + return -ENXIO; 132 + } 131 133 132 134 irq = irq_of_parse_and_map(node, 0); 133 - if (irq <= 0) 134 - panic("%s: irq_of_parse_and_map failed\n", node->full_name); 135 + if (irq <= 0) { 136 + pr_err("%s: irq_of_parse_and_map failed\n", node->full_name); 137 + return -EINVAL; 138 + } 135 139 136 140 ret = setup_irq(irq, &moxart_timer_irq); 137 - if (ret) 138 - panic("%s: setup_irq failed\n", node->full_name); 141 + if (ret) { 142 + pr_err("%s: setup_irq failed\n", node->full_name); 143 + return ret; 144 + } 139 145 140 146 clk = of_clk_get(node, 0); 141 - if (IS_ERR(clk)) 142 - panic("%s: of_clk_get failed\n", node->full_name); 147 + if (IS_ERR(clk)) { 148 + pr_err("%s: of_clk_get failed\n", node->full_name); 149 + return PTR_ERR(clk); 150 + } 143 151 144 152 pclk = clk_get_rate(clk); 145 153 146 - if (clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT, 147 - "moxart_timer", pclk, 200, 32, 148 - clocksource_mmio_readl_down)) 149 - panic("%s: clocksource_mmio_init failed\n", node->full_name); 154 + ret = clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT, 155 + "moxart_timer", pclk, 200, 32, 156 + clocksource_mmio_readl_down); 157 + if (ret) { 158 + pr_err("%s: clocksource_mmio_init failed\n", node->full_name); 159 + return ret; 160 + } 150 161 151 162 clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ); 152 163 ··· 175 164 */ 176 165 clockevents_config_and_register(&moxart_clockevent, pclk, 177 166 0x4, 0xfffffffe); 167 + 168 + return 0; 178 169 } 179 170 CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init);
+5 -3
drivers/clocksource/mps2-timer.c
··· 250 250 return ret; 251 251 } 252 252 253 - static void __init mps2_timer_init(struct device_node *np) 253 + static int __init mps2_timer_init(struct device_node *np) 254 254 { 255 255 static int has_clocksource, has_clockevent; 256 256 int ret; ··· 259 259 ret = mps2_clocksource_init(np); 260 260 if (!ret) { 261 261 has_clocksource = 1; 262 - return; 262 + return 0; 263 263 } 264 264 } 265 265 ··· 267 267 ret = mps2_clockevent_init(np); 268 268 if (!ret) { 269 269 has_clockevent = 1; 270 - return; 270 + return 0; 271 271 } 272 272 } 273 + 274 + return 0; 273 275 } 274 276 275 277 CLOCKSOURCE_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init);
+5 -3
drivers/clocksource/mtk_timer.c
··· 181 181 evt->gpt_base + GPT_IRQ_EN_REG); 182 182 } 183 183 184 - static void __init mtk_timer_init(struct device_node *node) 184 + static int __init mtk_timer_init(struct device_node *node) 185 185 { 186 186 struct mtk_clock_event_device *evt; 187 187 struct resource res; ··· 190 190 191 191 evt = kzalloc(sizeof(*evt), GFP_KERNEL); 192 192 if (!evt) 193 - return; 193 + return -ENOMEM; 194 194 195 195 evt->dev.name = "mtk_tick"; 196 196 evt->dev.rating = 300; ··· 248 248 249 249 mtk_timer_enable_irq(evt, GPT_CLK_EVT); 250 250 251 - return; 251 + return 0; 252 252 253 253 err_clk_disable: 254 254 clk_disable_unprepare(clk); ··· 262 262 release_mem_region(res.start, resource_size(&res)); 263 263 err_kzalloc: 264 264 kfree(evt); 265 + 266 + return -EINVAL; 265 267 } 266 268 CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
+17 -9
drivers/clocksource/mxs_timer.c
··· 31 31 #include <linux/stmp_device.h> 32 32 #include <linux/sched_clock.h> 33 33 34 - #include <asm/mach/time.h> 35 - 36 34 /* 37 35 * There are 2 versions of the timrot on Freescale MXS-based SoCs. 38 36 * The v1 on MX23 only gets 16 bits counter, while v2 on MX28 ··· 224 226 return 0; 225 227 } 226 228 227 - static void __init mxs_timer_init(struct device_node *np) 229 + static int __init mxs_timer_init(struct device_node *np) 228 230 { 229 231 struct clk *timer_clk; 230 - int irq; 232 + int irq, ret; 231 233 232 234 mxs_timrot_base = of_iomap(np, 0); 233 235 WARN_ON(!mxs_timrot_base); ··· 235 237 timer_clk = of_clk_get(np, 0); 236 238 if (IS_ERR(timer_clk)) { 237 239 pr_err("%s: failed to get clk\n", __func__); 238 - return; 240 + return PTR_ERR(timer_clk); 239 241 } 240 242 241 - clk_prepare_enable(timer_clk); 243 + ret = clk_prepare_enable(timer_clk); 244 + if (ret) 245 + return ret; 242 246 243 247 /* 244 248 * Initialize timers to a known state ··· 278 278 mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1)); 279 279 280 280 /* init and register the timer to the framework */ 281 - mxs_clocksource_init(timer_clk); 282 - mxs_clockevent_init(timer_clk); 281 + ret = mxs_clocksource_init(timer_clk); 282 + if (ret) 283 + return ret; 284 + 285 + ret = mxs_clockevent_init(timer_clk); 286 + if (ret) 287 + return ret; 283 288 284 289 /* Make irqs happen */ 285 290 irq = irq_of_parse_and_map(np, 0); 286 - setup_irq(irq, &mxs_timer_irq); 291 + if (irq <= 0) 292 + return -EINVAL; 293 + 294 + return setup_irq(irq, &mxs_timer_irq); 287 295 } 288 296 CLOCKSOURCE_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init);
+28 -15
drivers/clocksource/nomadik-mtu.c
··· 193 193 .dev_id = &nmdk_clkevt, 194 194 }; 195 195 196 - static void __init nmdk_timer_init(void __iomem *base, int irq, 196 + static int __init nmdk_timer_init(void __iomem *base, int irq, 197 197 struct clk *pclk, struct clk *clk) 198 198 { 199 199 unsigned long rate; 200 + int ret; 200 201 201 202 mtu_base = base; 202 203 ··· 227 226 /* Timer 0 is the free running clocksource */ 228 227 nmdk_clksrc_reset(); 229 228 230 - if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0", 231 - rate, 200, 32, clocksource_mmio_readl_down)) 232 - pr_err("timer: failed to initialize clock source %s\n", 233 - "mtu_0"); 229 + ret = clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0", 230 + rate, 200, 32, clocksource_mmio_readl_down); 231 + if (ret) { 232 + pr_err("timer: failed to initialize clock source %s\n", "mtu_0"); 233 + return ret; 234 + } 234 235 235 236 #ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK 236 237 sched_clock_register(nomadik_read_sched_clock, 32, rate); ··· 247 244 mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer; 248 245 mtu_delay_timer.freq = rate; 249 246 register_current_timer_delay(&mtu_delay_timer); 247 + 248 + return 0; 250 249 } 251 250 252 - static void __init nmdk_timer_of_init(struct device_node *node) 251 + static int __init nmdk_timer_of_init(struct device_node *node) 253 252 { 254 253 struct clk *pclk; 255 254 struct clk *clk; ··· 259 254 int irq; 260 255 261 256 base = of_iomap(node, 0); 262 - if (!base) 263 - panic("Can't remap registers"); 257 + if (!base) { 258 + pr_err("Can't remap registers"); 259 + return -ENXIO; 260 + } 264 261 265 262 pclk = of_clk_get_by_name(node, "apb_pclk"); 266 - if (IS_ERR(pclk)) 267 - panic("could not get apb_pclk"); 263 + if (IS_ERR(pclk)) { 264 + pr_err("could not get apb_pclk"); 265 + return PTR_ERR(pclk); 266 + } 268 267 269 268 clk = of_clk_get_by_name(node, "timclk"); 270 - if (IS_ERR(clk)) 271 - panic("could not get timclk"); 269 + if (IS_ERR(clk)) { 270 + pr_err("could not get timclk"); 271 + return PTR_ERR(clk); 272 + } 272 273 273 274 irq = irq_of_parse_and_map(node, 0); 274 - if (irq <= 0) 275 - panic("Can't parse IRQ"); 275 + if (irq <= 0) { 276 + pr_err("Can't parse IRQ"); 277 + return -EINVAL; 278 + } 276 279 277 - nmdk_timer_init(base, irq, pclk, clk); 280 + return nmdk_timer_init(base, irq, pclk, clk); 278 281 } 279 282 CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu", 280 283 nmdk_timer_of_init);
+32 -12
drivers/clocksource/pxa_timer.c
··· 150 150 .dev_id = &ckevt_pxa_osmr0, 151 151 }; 152 152 153 - static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate) 153 + static int __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate) 154 154 { 155 + int ret; 156 + 155 157 timer_writel(0, OIER); 156 158 timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); 157 159 ··· 161 159 162 160 ckevt_pxa_osmr0.cpumask = cpumask_of(0); 163 161 164 - setup_irq(irq, &pxa_ost0_irq); 162 + ret = setup_irq(irq, &pxa_ost0_irq); 163 + if (ret) { 164 + pr_err("Failed to setup irq"); 165 + return ret; 166 + } 165 167 166 - clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200, 167 - 32, clocksource_mmio_readl_up); 168 + ret = clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200, 169 + 32, clocksource_mmio_readl_up); 170 + if (ret) { 171 + pr_err("Failed to init clocksource"); 172 + return ret; 173 + } 174 + 168 175 clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate, 169 176 MIN_OSCR_DELTA * 2, 0x7fffffff); 177 + 178 + return 0; 170 179 } 171 180 172 - static void __init pxa_timer_dt_init(struct device_node *np) 181 + static int __init pxa_timer_dt_init(struct device_node *np) 173 182 { 174 183 struct clk *clk; 175 - int irq; 184 + int irq, ret; 176 185 177 186 /* timer registers are shared with watchdog timer */ 178 187 timer_base = of_iomap(np, 0); 179 - if (!timer_base) 180 - panic("%s: unable to map resource\n", np->name); 188 + if (!timer_base) { 189 + pr_err("%s: unable to map resource\n", np->name); 190 + return -ENXIO; 191 + } 181 192 182 193 clk = of_clk_get(np, 0); 183 194 if (IS_ERR(clk)) { 184 195 pr_crit("%s: unable to get clk\n", np->name); 185 - return; 196 + return PTR_ERR(clk); 186 197 } 187 - clk_prepare_enable(clk); 198 + 199 + ret = clk_prepare_enable(clk); 200 + if (ret) { 201 + pr_crit("Failed to prepare clock"); 202 + return ret; 203 + } 188 204 189 205 /* we are only interested in OS-timer0 irq */ 190 206 irq = irq_of_parse_and_map(np, 0); 191 207 if (irq <= 0) { 192 208 pr_crit("%s: unable to parse OS-timer0 irq\n", np->name); 193 - return; 209 + return -EINVAL; 194 210 } 195 211 196 - pxa_timer_common_init(irq, clk_get_rate(clk)); 212 + return pxa_timer_common_init(irq, clk_get_rate(clk)); 197 213 } 198 214 CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init); 199 215
+13 -10
drivers/clocksource/qcom-timer.c
··· 178 178 .read_current_timer = msm_read_current_timer, 179 179 }; 180 180 181 - static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, 181 + static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, 182 182 bool percpu) 183 183 { 184 184 struct clocksource *cs = &msm_clocksource; ··· 218 218 sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz); 219 219 msm_delay_timer.freq = dgt_hz; 220 220 register_current_timer_delay(&msm_delay_timer); 221 + 222 + return res; 221 223 } 222 224 223 - static void __init msm_dt_timer_init(struct device_node *np) 225 + static int __init msm_dt_timer_init(struct device_node *np) 224 226 { 225 227 u32 freq; 226 - int irq; 228 + int irq, ret; 227 229 struct resource res; 228 230 u32 percpu_offset; 229 231 void __iomem *base; ··· 234 232 base = of_iomap(np, 0); 235 233 if (!base) { 236 234 pr_err("Failed to map event base\n"); 237 - return; 235 + return -ENXIO; 238 236 } 239 237 240 238 /* We use GPT0 for the clockevent */ 241 239 irq = irq_of_parse_and_map(np, 1); 242 240 if (irq <= 0) { 243 241 pr_err("Can't get irq\n"); 244 - return; 242 + return -EINVAL; 245 243 } 246 244 247 245 /* We use CPU0's DGT for the clocksource */ 248 246 if (of_property_read_u32(np, "cpu-offset", &percpu_offset)) 249 247 percpu_offset = 0; 250 248 251 - if (of_address_to_resource(np, 0, &res)) { 249 + ret = of_address_to_resource(np, 0, &res); 250 + if (ret) { 252 251 pr_err("Failed to parse DGT resource\n"); 253 - return; 252 + return ret; 254 253 } 255 254 256 255 cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res)); 257 256 if (!cpu0_base) { 258 257 pr_err("Failed to map source base\n"); 259 - return; 258 + return -EINVAL; 260 259 } 261 260 262 261 if (of_property_read_u32(np, "clock-frequency", &freq)) { 263 262 pr_err("Unknown frequency\n"); 264 - return; 263 + return -EINVAL; 265 264 } 266 265 267 266 event_base = base + 0x4; ··· 271 268 freq /= 4; 272 269 writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL); 273 270 274 - msm_timer_init(freq, 32, irq, !!percpu_offset); 271 + return msm_timer_init(freq, 32, irq, !!percpu_offset); 275 272 } 276 273 CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init); 277 274 CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
+41 -12
drivers/clocksource/rockchip_timer.c
··· 19 19 20 20 #define TIMER_LOAD_COUNT0 0x00 21 21 #define TIMER_LOAD_COUNT1 0x04 22 - #define TIMER_CONTROL_REG 0x10 22 + #define TIMER_CONTROL_REG3288 0x10 23 + #define TIMER_CONTROL_REG3399 0x1c 23 24 #define TIMER_INT_STATUS 0x18 24 25 25 26 #define TIMER_DISABLE 0x0 ··· 32 31 struct bc_timer { 33 32 struct clock_event_device ce; 34 33 void __iomem *base; 34 + void __iomem *ctrl; 35 35 u32 freq; 36 36 }; 37 37 ··· 48 46 return rk_timer(ce)->base; 49 47 } 50 48 49 + static inline void __iomem *rk_ctrl(struct clock_event_device *ce) 50 + { 51 + return rk_timer(ce)->ctrl; 52 + } 53 + 51 54 static inline void rk_timer_disable(struct clock_event_device *ce) 52 55 { 53 - writel_relaxed(TIMER_DISABLE, rk_base(ce) + TIMER_CONTROL_REG); 56 + writel_relaxed(TIMER_DISABLE, rk_ctrl(ce)); 54 57 } 55 58 56 59 static inline void rk_timer_enable(struct clock_event_device *ce, u32 flags) 57 60 { 58 61 writel_relaxed(TIMER_ENABLE | TIMER_INT_UNMASK | flags, 59 - rk_base(ce) + TIMER_CONTROL_REG); 62 + rk_ctrl(ce)); 60 63 } 61 64 62 65 static void rk_timer_update_counter(unsigned long cycles, ··· 113 106 return IRQ_HANDLED; 114 107 } 115 108 116 - static void __init rk_timer_init(struct device_node *np) 109 + static int __init rk_timer_init(struct device_node *np, u32 ctrl_reg) 117 110 { 118 111 struct clock_event_device *ce = &bc_timer.ce; 119 112 struct clk *timer_clk; 120 113 struct clk *pclk; 121 - int ret, irq; 114 + int ret = -EINVAL, irq; 122 115 123 116 bc_timer.base = of_iomap(np, 0); 124 117 if (!bc_timer.base) { 125 118 pr_err("Failed to get base address for '%s'\n", TIMER_NAME); 126 - return; 119 + return -ENXIO; 127 120 } 121 + bc_timer.ctrl = bc_timer.base + ctrl_reg; 128 122 129 123 pclk = of_clk_get_by_name(np, "pclk"); 130 124 if (IS_ERR(pclk)) { 125 + ret = PTR_ERR(pclk); 131 126 pr_err("Failed to get pclk for '%s'\n", TIMER_NAME); 132 127 goto out_unmap; 133 128 } 134 129 135 - if (clk_prepare_enable(pclk)) { 130 + ret = clk_prepare_enable(pclk); 131 + if (ret) { 136 132 pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME); 137 133 goto out_unmap; 138 134 } 139 135 140 136 timer_clk = of_clk_get_by_name(np, "timer"); 141 137 if (IS_ERR(timer_clk)) { 138 + ret = PTR_ERR(timer_clk); 142 139 pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME); 143 140 goto out_timer_clk; 144 141 } 145 142 146 - if (clk_prepare_enable(timer_clk)) { 143 + ret = clk_prepare_enable(timer_clk); 144 + if (ret) { 147 145 pr_err("Failed to enable timer clock\n"); 148 146 goto out_timer_clk; 149 147 } ··· 157 145 158 146 irq = irq_of_parse_and_map(np, 0); 159 147 if (!irq) { 148 + ret = -EINVAL; 160 149 pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); 161 150 goto out_irq; 162 151 } 163 152 164 153 ce->name = TIMER_NAME; 165 - ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; 154 + ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | 155 + CLOCK_EVT_FEAT_DYNIRQ; 166 156 ce->set_next_event = rk_timer_set_next_event; 167 157 ce->set_state_shutdown = rk_timer_shutdown; 168 158 ce->set_state_periodic = rk_timer_set_periodic; 169 159 ce->irq = irq; 170 - ce->cpumask = cpumask_of(0); 160 + ce->cpumask = cpu_possible_mask; 171 161 ce->rating = 250; 172 162 173 163 rk_timer_interrupt_clear(ce); ··· 183 169 184 170 clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX); 185 171 186 - return; 172 + return 0; 187 173 188 174 out_irq: 189 175 clk_disable_unprepare(timer_clk); ··· 191 177 clk_disable_unprepare(pclk); 192 178 out_unmap: 193 179 iounmap(bc_timer.base); 180 + 181 + return ret; 194 182 } 195 183 196 - CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init); 184 + static int __init rk3288_timer_init(struct device_node *np) 185 + { 186 + return rk_timer_init(np, TIMER_CONTROL_REG3288); 187 + } 188 + 189 + static int __init rk3399_timer_init(struct device_node *np) 190 + { 191 + return rk_timer_init(np, TIMER_CONTROL_REG3399); 192 + } 193 + 194 + CLOCKSOURCE_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer", 195 + rk3288_timer_init); 196 + CLOCKSOURCE_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer", 197 + rk3399_timer_init);
+37 -33
drivers/clocksource/samsung_pwm_timer.c
··· 130 130 131 131 spin_lock_irqsave(&samsung_pwm_lock, flags); 132 132 133 - tcon = __raw_readl(pwm.base + REG_TCON); 133 + tcon = readl_relaxed(pwm.base + REG_TCON); 134 134 tcon &= ~TCON_START(channel); 135 - __raw_writel(tcon, pwm.base + REG_TCON); 135 + writel_relaxed(tcon, pwm.base + REG_TCON); 136 136 137 137 spin_unlock_irqrestore(&samsung_pwm_lock, flags); 138 138 } ··· 148 148 149 149 spin_lock_irqsave(&samsung_pwm_lock, flags); 150 150 151 - tcon = __raw_readl(pwm.base + REG_TCON); 151 + tcon = readl_relaxed(pwm.base + REG_TCON); 152 152 153 153 tcon &= ~(TCON_START(tcon_chan) | TCON_AUTORELOAD(tcon_chan)); 154 154 tcon |= TCON_MANUALUPDATE(tcon_chan); 155 155 156 - __raw_writel(tcnt, pwm.base + REG_TCNTB(channel)); 157 - __raw_writel(tcnt, pwm.base + REG_TCMPB(channel)); 158 - __raw_writel(tcon, pwm.base + REG_TCON); 156 + writel_relaxed(tcnt, pwm.base + REG_TCNTB(channel)); 157 + writel_relaxed(tcnt, pwm.base + REG_TCMPB(channel)); 158 + writel_relaxed(tcon, pwm.base + REG_TCON); 159 159 160 160 spin_unlock_irqrestore(&samsung_pwm_lock, flags); 161 161 } ··· 170 170 171 171 spin_lock_irqsave(&samsung_pwm_lock, flags); 172 172 173 - tcon = __raw_readl(pwm.base + REG_TCON); 173 + tcon = readl_relaxed(pwm.base + REG_TCON); 174 174 175 175 tcon &= ~TCON_MANUALUPDATE(channel); 176 176 tcon |= TCON_START(channel); ··· 180 180 else 181 181 tcon &= ~TCON_AUTORELOAD(channel); 182 182 183 - __raw_writel(tcon, pwm.base + REG_TCON); 183 + writel_relaxed(tcon, pwm.base + REG_TCON); 184 184 185 185 spin_unlock_irqrestore(&samsung_pwm_lock, flags); 186 186 } ··· 333 333 return samsung_clocksource_read(NULL); 334 334 } 335 335 336 - static void __init samsung_clocksource_init(void) 336 + static int __init samsung_clocksource_init(void) 337 337 { 338 338 unsigned long pclk; 339 339 unsigned long clock_rate; 340 - int ret; 341 340 342 341 pclk = clk_get_rate(pwm.timerclk); 343 342 ··· 357 358 pwm.variant.bits, clock_rate); 358 359 359 360 samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits); 360 - ret = clocksource_register_hz(&samsung_clocksource, clock_rate); 361 - if (ret) 362 - panic("samsung_clocksource_timer: can't register clocksource\n"); 361 + return clocksource_register_hz(&samsung_clocksource, clock_rate); 363 362 } 364 363 365 364 static void __init samsung_timer_resources(void) ··· 377 380 /* 378 381 * PWM master driver 379 382 */ 380 - static void __init _samsung_pwm_clocksource_init(void) 383 + static int __init _samsung_pwm_clocksource_init(void) 381 384 { 382 385 u8 mask; 383 386 int channel; 384 387 385 388 mask = ~pwm.variant.output_mask & ((1 << SAMSUNG_PWM_NUM) - 1); 386 389 channel = fls(mask) - 1; 387 - if (channel < 0) 388 - panic("failed to find PWM channel for clocksource"); 390 + if (channel < 0) { 391 + pr_crit("failed to find PWM channel for clocksource"); 392 + return -EINVAL; 393 + } 389 394 pwm.source_id = channel; 390 395 391 396 mask &= ~(1 << channel); 392 397 channel = fls(mask) - 1; 393 - if (channel < 0) 394 - panic("failed to find PWM channel for clock event"); 398 + if (channel < 0) { 399 + pr_crit("failed to find PWM channel for clock event"); 400 + return -EINVAL; 401 + } 395 402 pwm.event_id = channel; 396 403 397 404 samsung_timer_resources(); 398 405 samsung_clockevent_init(); 399 - samsung_clocksource_init(); 406 + 407 + return samsung_clocksource_init(); 400 408 } 401 409 402 410 void __init samsung_pwm_clocksource_init(void __iomem *base, ··· 419 417 } 420 418 421 419 #ifdef CONFIG_CLKSRC_OF 422 - static void __init samsung_pwm_alloc(struct device_node *np, 423 - const struct samsung_pwm_variant *variant) 420 + static int __init samsung_pwm_alloc(struct device_node *np, 421 + const struct samsung_pwm_variant *variant) 424 422 { 425 423 struct property *prop; 426 424 const __be32 *cur; ··· 443 441 pwm.base = of_iomap(np, 0); 444 442 if (!pwm.base) { 445 443 pr_err("%s: failed to map PWM registers\n", __func__); 446 - return; 444 + return -ENXIO; 447 445 } 448 446 449 447 pwm.timerclk = of_clk_get_by_name(np, "timers"); 450 - if (IS_ERR(pwm.timerclk)) 451 - panic("failed to get timers clock for timer"); 448 + if (IS_ERR(pwm.timerclk)) { 449 + pr_crit("failed to get timers clock for timer"); 450 + return PTR_ERR(pwm.timerclk); 451 + } 452 452 453 - _samsung_pwm_clocksource_init(); 453 + return _samsung_pwm_clocksource_init(); 454 454 } 455 455 456 456 static const struct samsung_pwm_variant s3c24xx_variant = { ··· 462 458 .tclk_mask = (1 << 4), 463 459 }; 464 460 465 - static void __init s3c2410_pwm_clocksource_init(struct device_node *np) 461 + static int __init s3c2410_pwm_clocksource_init(struct device_node *np) 466 462 { 467 - samsung_pwm_alloc(np, &s3c24xx_variant); 463 + return samsung_pwm_alloc(np, &s3c24xx_variant); 468 464 } 469 465 CLOCKSOURCE_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init); 470 466 ··· 475 471 .tclk_mask = (1 << 7) | (1 << 6) | (1 << 5), 476 472 }; 477 473 478 - static void __init s3c64xx_pwm_clocksource_init(struct device_node *np) 474 + static int __init s3c64xx_pwm_clocksource_init(struct device_node *np) 479 475 { 480 - samsung_pwm_alloc(np, &s3c64xx_variant); 476 + return samsung_pwm_alloc(np, &s3c64xx_variant); 481 477 } 482 478 CLOCKSOURCE_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init); 483 479 ··· 488 484 .tclk_mask = 0, 489 485 }; 490 486 491 - static void __init s5p64x0_pwm_clocksource_init(struct device_node *np) 487 + static int __init s5p64x0_pwm_clocksource_init(struct device_node *np) 492 488 { 493 - samsung_pwm_alloc(np, &s5p64x0_variant); 489 + return samsung_pwm_alloc(np, &s5p64x0_variant); 494 490 } 495 491 CLOCKSOURCE_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init); 496 492 ··· 501 497 .tclk_mask = (1 << 5), 502 498 }; 503 499 504 - static void __init s5p_pwm_clocksource_init(struct device_node *np) 500 + static int __init s5p_pwm_clocksource_init(struct device_node *np) 505 501 { 506 - samsung_pwm_alloc(np, &s5p_variant); 502 + return samsung_pwm_alloc(np, &s5p_variant); 507 503 } 508 504 CLOCKSOURCE_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init); 509 505 #endif
+31 -12
drivers/clocksource/sun4i_timer.c
··· 146 146 return ~readl(timer_base + TIMER_CNTVAL_REG(1)); 147 147 } 148 148 149 - static void __init sun4i_timer_init(struct device_node *node) 149 + static int __init sun4i_timer_init(struct device_node *node) 150 150 { 151 151 unsigned long rate = 0; 152 152 struct clk *clk; ··· 154 154 u32 val; 155 155 156 156 timer_base = of_iomap(node, 0); 157 - if (!timer_base) 158 - panic("Can't map registers"); 157 + if (!timer_base) { 158 + pr_crit("Can't map registers"); 159 + return -ENXIO; 160 + } 159 161 160 162 irq = irq_of_parse_and_map(node, 0); 161 - if (irq <= 0) 162 - panic("Can't parse IRQ"); 163 + if (irq <= 0) { 164 + pr_crit("Can't parse IRQ"); 165 + return -EINVAL; 166 + } 163 167 164 168 clk = of_clk_get(node, 0); 165 - if (IS_ERR(clk)) 166 - panic("Can't get timer clock"); 167 - clk_prepare_enable(clk); 169 + if (IS_ERR(clk)) { 170 + pr_crit("Can't get timer clock"); 171 + return PTR_ERR(clk); 172 + } 173 + 174 + ret = clk_prepare_enable(clk); 175 + if (ret) { 176 + pr_err("Failed to prepare clock"); 177 + return ret; 178 + } 168 179 169 180 rate = clk_get_rate(clk); 170 181 ··· 193 182 of_machine_is_compatible("allwinner,sun5i-a10s")) 194 183 sched_clock_register(sun4i_timer_sched_read, 32, rate); 195 184 196 - clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, 197 - rate, 350, 32, clocksource_mmio_readl_down); 185 + ret = clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, 186 + rate, 350, 32, clocksource_mmio_readl_down); 187 + if (ret) { 188 + pr_err("Failed to register clocksource"); 189 + return ret; 190 + } 198 191 199 192 ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); 200 193 ··· 215 200 TIMER_SYNC_TICKS, 0xffffffff); 216 201 217 202 ret = setup_irq(irq, &sun4i_timer_irq); 218 - if (ret) 219 - pr_warn("failed to setup irq %d\n", irq); 203 + if (ret) { 204 + pr_err("failed to setup irq %d\n", irq); 205 + return ret; 206 + } 220 207 221 208 /* Enable timer0 interrupt */ 222 209 val = readl(timer_base + TIMER_IRQ_EN_REG); 223 210 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); 211 + 212 + return ret; 224 213 } 225 214 CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer", 226 215 sun4i_timer_init);
+6 -4
drivers/clocksource/tango_xtal.c
··· 19 19 return read_xtal_counter(); 20 20 } 21 21 22 - static void __init tango_clocksource_init(struct device_node *np) 22 + static int __init tango_clocksource_init(struct device_node *np) 23 23 { 24 24 struct clk *clk; 25 25 int xtal_freq, ret; ··· 27 27 xtal_in_cnt = of_iomap(np, 0); 28 28 if (xtal_in_cnt == NULL) { 29 29 pr_err("%s: invalid address\n", np->full_name); 30 - return; 30 + return -ENXIO; 31 31 } 32 32 33 33 clk = of_clk_get(np, 0); 34 34 if (IS_ERR(clk)) { 35 35 pr_err("%s: invalid clock\n", np->full_name); 36 - return; 36 + return PTR_ERR(clk); 37 37 } 38 38 39 39 xtal_freq = clk_get_rate(clk); ··· 44 44 32, clocksource_mmio_readl_up); 45 45 if (ret) { 46 46 pr_err("%s: registration failed\n", np->full_name); 47 - return; 47 + return ret; 48 48 } 49 49 50 50 sched_clock_register(read_sched_clock, 32, xtal_freq); 51 51 register_current_timer_delay(&delay_timer); 52 + 53 + return 0; 52 54 } 53 55 54 56 CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
+14 -10
drivers/clocksource/tegra20_timer.c
··· 165 165 .dev_id = &tegra_clockevent, 166 166 }; 167 167 168 - static void __init tegra20_init_timer(struct device_node *np) 168 + static int __init tegra20_init_timer(struct device_node *np) 169 169 { 170 170 struct clk *clk; 171 171 unsigned long rate; ··· 174 174 timer_reg_base = of_iomap(np, 0); 175 175 if (!timer_reg_base) { 176 176 pr_err("Can't map timer registers\n"); 177 - BUG(); 177 + return -ENXIO; 178 178 } 179 179 180 180 tegra_timer_irq.irq = irq_of_parse_and_map(np, 2); 181 181 if (tegra_timer_irq.irq <= 0) { 182 182 pr_err("Failed to map timer IRQ\n"); 183 - BUG(); 183 + return -EINVAL; 184 184 } 185 185 186 186 clk = of_clk_get(np, 0); ··· 211 211 212 212 sched_clock_register(tegra_read_sched_clock, 32, 1000000); 213 213 214 - if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US, 215 - "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) { 214 + ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US, 215 + "timer_us", 1000000, 300, 32, 216 + clocksource_mmio_readl_up); 217 + if (ret) { 216 218 pr_err("Failed to register clocksource\n"); 217 - BUG(); 219 + return ret; 218 220 } 219 221 220 222 tegra_delay_timer.read_current_timer = ··· 227 225 ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq); 228 226 if (ret) { 229 227 pr_err("Failed to register timer IRQ: %d\n", ret); 230 - BUG(); 228 + return ret; 231 229 } 232 230 233 231 tegra_clockevent.cpumask = cpu_all_mask; 234 232 tegra_clockevent.irq = tegra_timer_irq.irq; 235 233 clockevents_config_and_register(&tegra_clockevent, 1000000, 236 234 0x1, 0x1fffffff); 235 + 236 + return 0; 237 237 } 238 238 CLOCKSOURCE_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer); 239 239 240 - static void __init tegra20_init_rtc(struct device_node *np) 240 + static int __init tegra20_init_rtc(struct device_node *np) 241 241 { 242 242 struct clk *clk; 243 243 244 244 rtc_base = of_iomap(np, 0); 245 245 if (!rtc_base) { 246 246 pr_err("Can't map RTC registers"); 247 - BUG(); 247 + return -ENXIO; 248 248 } 249 249 250 250 /* ··· 259 255 else 260 256 clk_prepare_enable(clk); 261 257 262 - register_persistent_clock(NULL, tegra_read_persistent_clock64); 258 + return register_persistent_clock(NULL, tegra_read_persistent_clock64); 263 259 } 264 260 CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
+74 -24
drivers/clocksource/time-armada-370-xp.c
··· 246 246 writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF); 247 247 } 248 248 249 - struct syscore_ops armada_370_xp_timer_syscore_ops = { 249 + static struct syscore_ops armada_370_xp_timer_syscore_ops = { 250 250 .suspend = armada_370_xp_timer_suspend, 251 251 .resume = armada_370_xp_timer_resume, 252 252 }; ··· 260 260 .read_current_timer = armada_370_delay_timer_read, 261 261 }; 262 262 263 - static void __init armada_370_xp_timer_common_init(struct device_node *np) 263 + static int __init armada_370_xp_timer_common_init(struct device_node *np) 264 264 { 265 265 u32 clr = 0, set = 0; 266 266 int res; 267 267 268 268 timer_base = of_iomap(np, 0); 269 - WARN_ON(!timer_base); 269 + if (!timer_base) { 270 + pr_err("Failed to iomap"); 271 + return -ENXIO; 272 + } 273 + 270 274 local_base = of_iomap(np, 1); 275 + if (!local_base) { 276 + pr_err("Failed to iomap"); 277 + return -ENXIO; 278 + } 271 279 272 280 if (timer25Mhz) { 273 281 set = TIMER0_25MHZ; ··· 314 306 */ 315 307 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); 316 308 317 - clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, 318 - "armada_370_xp_clocksource", 319 - timer_clk, 300, 32, clocksource_mmio_readl_down); 309 + res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, 310 + "armada_370_xp_clocksource", 311 + timer_clk, 300, 32, clocksource_mmio_readl_down); 312 + if (res) { 313 + pr_err("Failed to initialize clocksource mmio"); 314 + return res; 315 + } 320 316 321 317 register_cpu_notifier(&armada_370_xp_timer_cpu_nb); 322 318 323 319 armada_370_xp_evt = alloc_percpu(struct clock_event_device); 324 - 320 + if (!armada_370_xp_evt) 321 + return -ENOMEM; 325 322 326 323 /* 327 324 * Setup clockevent timer (interrupt-driven). ··· 336 323 "armada_370_xp_per_cpu_tick", 337 324 armada_370_xp_evt); 338 325 /* Immediately configure the timer on the boot CPU */ 339 - if (!res) 340 - armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); 326 + if (res) { 327 + pr_err("Failed to request percpu irq"); 328 + return res; 329 + } 330 + 331 + res = armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); 332 + if (res) { 333 + pr_err("Failed to setup timer"); 334 + return res; 335 + } 341 336 342 337 register_syscore_ops(&armada_370_xp_timer_syscore_ops); 338 + 339 + return 0; 343 340 } 344 341 345 - static void __init armada_xp_timer_init(struct device_node *np) 342 + static int __init armada_xp_timer_init(struct device_node *np) 346 343 { 347 344 struct clk *clk = of_clk_get_by_name(np, "fixed"); 345 + int ret; 348 346 349 - /* The 25Mhz fixed clock is mandatory, and must always be available */ 350 - BUG_ON(IS_ERR(clk)); 351 - clk_prepare_enable(clk); 347 + clk = of_clk_get(np, 0); 348 + if (IS_ERR(clk)) { 349 + pr_err("Failed to get clock"); 350 + return PTR_ERR(clk); 351 + } 352 + 353 + ret = clk_prepare_enable(clk); 354 + if (ret) 355 + return ret; 356 + 352 357 timer_clk = clk_get_rate(clk); 353 358 354 - armada_370_xp_timer_common_init(np); 359 + return armada_370_xp_timer_common_init(np); 355 360 } 356 361 CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer", 357 362 armada_xp_timer_init); 358 363 359 - static void __init armada_375_timer_init(struct device_node *np) 364 + static int __init armada_375_timer_init(struct device_node *np) 360 365 { 361 366 struct clk *clk; 367 + int ret; 362 368 363 369 clk = of_clk_get_by_name(np, "fixed"); 364 370 if (!IS_ERR(clk)) { 365 - clk_prepare_enable(clk); 371 + ret = clk_prepare_enable(clk); 372 + if (ret) 373 + return ret; 366 374 timer_clk = clk_get_rate(clk); 367 375 } else { 368 376 ··· 394 360 clk = of_clk_get(np, 0); 395 361 396 362 /* Must have at least a clock */ 397 - BUG_ON(IS_ERR(clk)); 398 - clk_prepare_enable(clk); 363 + if (IS_ERR(clk)) { 364 + pr_err("Failed to get clock"); 365 + return PTR_ERR(clk); 366 + } 367 + 368 + ret = clk_prepare_enable(clk); 369 + if (ret) 370 + return ret; 371 + 399 372 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; 400 373 timer25Mhz = false; 401 374 } 402 375 403 - armada_370_xp_timer_common_init(np); 376 + return armada_370_xp_timer_common_init(np); 404 377 } 405 378 CLOCKSOURCE_OF_DECLARE(armada_375, "marvell,armada-375-timer", 406 379 armada_375_timer_init); 407 380 408 - static void __init armada_370_timer_init(struct device_node *np) 381 + static int __init armada_370_timer_init(struct device_node *np) 409 382 { 410 - struct clk *clk = of_clk_get(np, 0); 383 + struct clk *clk; 384 + int ret; 411 385 412 - BUG_ON(IS_ERR(clk)); 413 - clk_prepare_enable(clk); 386 + clk = of_clk_get(np, 0); 387 + if (IS_ERR(clk)) { 388 + pr_err("Failed to get clock"); 389 + return PTR_ERR(clk); 390 + } 391 + 392 + ret = clk_prepare_enable(clk); 393 + if (ret) 394 + return ret; 395 + 414 396 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; 415 397 timer25Mhz = false; 416 398 417 - armada_370_xp_timer_common_init(np); 399 + return armada_370_xp_timer_common_init(np); 418 400 } 419 401 CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer", 420 402 armada_370_timer_init);
+12 -5
drivers/clocksource/time-efm32.c
··· 233 233 DIV_ROUND_CLOSEST(rate, 1024), 234 234 0xf, 0xffff); 235 235 236 - setup_irq(irq, &efm32_clock_event_irq); 236 + ret = setup_irq(irq, &efm32_clock_event_irq); 237 + if (ret) { 238 + pr_err("Failed setup irq"); 239 + goto err_setup_irq; 240 + } 237 241 238 242 return 0; 239 243 244 + err_setup_irq: 240 245 err_get_irq: 241 246 242 247 iounmap(base); ··· 260 255 * This function asserts that we have exactly one clocksource and one 261 256 * clock_event_device in the end. 262 257 */ 263 - static void __init efm32_timer_init(struct device_node *np) 258 + static int __init efm32_timer_init(struct device_node *np) 264 259 { 265 260 static int has_clocksource, has_clockevent; 266 - int ret; 261 + int ret = 0; 267 262 268 263 if (!has_clocksource) { 269 264 ret = efm32_clocksource_init(np); 270 265 if (!ret) { 271 266 has_clocksource = 1; 272 - return; 267 + return 0; 273 268 } 274 269 } 275 270 ··· 277 272 ret = efm32_clockevent_init(np); 278 273 if (!ret) { 279 274 has_clockevent = 1; 280 - return; 275 + return 0; 281 276 } 282 277 } 278 + 279 + return ret; 283 280 } 284 281 CLOCKSOURCE_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init); 285 282 CLOCKSOURCE_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init);
+6 -4
drivers/clocksource/time-lpc32xx.c
··· 288 288 * This function asserts that we have exactly one clocksource and one 289 289 * clock_event_device in the end. 290 290 */ 291 - static void __init lpc32xx_timer_init(struct device_node *np) 291 + static int __init lpc32xx_timer_init(struct device_node *np) 292 292 { 293 293 static int has_clocksource, has_clockevent; 294 - int ret; 294 + int ret = 0; 295 295 296 296 if (!has_clocksource) { 297 297 ret = lpc32xx_clocksource_init(np); 298 298 if (!ret) { 299 299 has_clocksource = 1; 300 - return; 300 + return 0; 301 301 } 302 302 } 303 303 ··· 305 305 ret = lpc32xx_clockevent_init(np); 306 306 if (!ret) { 307 307 has_clockevent = 1; 308 - return; 308 + return 0; 309 309 } 310 310 } 311 + 312 + return ret; 311 313 } 312 314 CLOCKSOURCE_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init);
+36 -14
drivers/clocksource/time-orion.c
··· 104 104 .handler = orion_clkevt_irq_handler, 105 105 }; 106 106 107 - static void __init orion_timer_init(struct device_node *np) 107 + static int __init orion_timer_init(struct device_node *np) 108 108 { 109 109 struct clk *clk; 110 - int irq; 110 + int irq, ret; 111 111 112 112 /* timer registers are shared with watchdog timer */ 113 113 timer_base = of_iomap(np, 0); 114 - if (!timer_base) 115 - panic("%s: unable to map resource\n", np->name); 114 + if (!timer_base) { 115 + pr_err("%s: unable to map resource\n", np->name); 116 + return -ENXIO; 117 + } 116 118 117 119 clk = of_clk_get(np, 0); 118 - if (IS_ERR(clk)) 119 - panic("%s: unable to get clk\n", np->name); 120 - clk_prepare_enable(clk); 120 + if (IS_ERR(clk)) { 121 + pr_err("%s: unable to get clk\n", np->name); 122 + return PTR_ERR(clk); 123 + } 124 + 125 + ret = clk_prepare_enable(clk); 126 + if (ret) { 127 + pr_err("Failed to prepare clock"); 128 + return ret; 129 + } 121 130 122 131 /* we are only interested in timer1 irq */ 123 132 irq = irq_of_parse_and_map(np, 1); 124 - if (irq <= 0) 125 - panic("%s: unable to parse timer1 irq\n", np->name); 133 + if (irq <= 0) { 134 + pr_err("%s: unable to parse timer1 irq\n", np->name); 135 + return -EINVAL; 136 + } 126 137 127 138 /* setup timer0 as free-running clocksource */ 128 139 writel(~0, timer_base + TIMER0_VAL); ··· 141 130 atomic_io_modify(timer_base + TIMER_CTRL, 142 131 TIMER0_RELOAD_EN | TIMER0_EN, 143 132 TIMER0_RELOAD_EN | TIMER0_EN); 144 - clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource", 145 - clk_get_rate(clk), 300, 32, 146 - clocksource_mmio_readl_down); 133 + 134 + ret = clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource", 135 + clk_get_rate(clk), 300, 32, 136 + clocksource_mmio_readl_down); 137 + if (ret) { 138 + pr_err("Failed to initialize mmio timer"); 139 + return ret; 140 + } 141 + 147 142 sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk)); 148 143 149 144 /* setup timer1 as clockevent timer */ 150 - if (setup_irq(irq, &orion_clkevt_irq)) 151 - panic("%s: unable to setup irq\n", np->name); 145 + ret = setup_irq(irq, &orion_clkevt_irq); 146 + if (ret) { 147 + pr_err("%s: unable to setup irq\n", np->name); 148 + return ret; 149 + } 152 150 153 151 ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ; 154 152 orion_clkevt.cpumask = cpumask_of(0); 155 153 orion_clkevt.irq = irq; 156 154 clockevents_config_and_register(&orion_clkevt, clk_get_rate(clk), 157 155 ORION_ONESHOT_MIN, ORION_ONESHOT_MAX); 156 + 157 + return 0; 158 158 } 159 159 CLOCKSOURCE_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init);
+9 -9
drivers/clocksource/time-pistachio.c
··· 148 148 }, 149 149 }; 150 150 151 - static void __init pistachio_clksrc_of_init(struct device_node *node) 151 + static int __init pistachio_clksrc_of_init(struct device_node *node) 152 152 { 153 153 struct clk *sys_clk, *fast_clk; 154 154 struct regmap *periph_regs; ··· 158 158 pcs_gpt.base = of_iomap(node, 0); 159 159 if (!pcs_gpt.base) { 160 160 pr_err("cannot iomap\n"); 161 - return; 161 + return -ENXIO; 162 162 } 163 163 164 164 periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph"); 165 165 if (IS_ERR(periph_regs)) { 166 166 pr_err("cannot get peripheral regmap (%ld)\n", 167 167 PTR_ERR(periph_regs)); 168 - return; 168 + return PTR_ERR(periph_regs); 169 169 } 170 170 171 171 /* Switch to using the fast counter clock */ 172 172 ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL, 173 173 0xf, 0x0); 174 174 if (ret) 175 - return; 175 + return ret; 176 176 177 177 sys_clk = of_clk_get_by_name(node, "sys"); 178 178 if (IS_ERR(sys_clk)) { 179 179 pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk)); 180 - return; 180 + return PTR_ERR(sys_clk); 181 181 } 182 182 183 183 fast_clk = of_clk_get_by_name(node, "fast"); 184 184 if (IS_ERR(fast_clk)) { 185 185 pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk)); 186 - return; 186 + return PTR_ERR(fast_clk); 187 187 } 188 188 189 189 ret = clk_prepare_enable(sys_clk); 190 190 if (ret < 0) { 191 191 pr_err("failed to enable clock (%d)\n", ret); 192 - return; 192 + return ret; 193 193 } 194 194 195 195 ret = clk_prepare_enable(fast_clk); 196 196 if (ret < 0) { 197 197 pr_err("failed to enable clock (%d)\n", ret); 198 198 clk_disable_unprepare(sys_clk); 199 - return; 199 + return ret; 200 200 } 201 201 202 202 rate = clk_get_rate(fast_clk); ··· 212 212 213 213 raw_spin_lock_init(&pcs_gpt.lock); 214 214 sched_clock_register(pistachio_read_sched_clock, 32, rate); 215 - clocksource_register_hz(&pcs_gpt.cs, rate); 215 + return clocksource_register_hz(&pcs_gpt.cs, rate); 216 216 } 217 217 CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer", 218 218 pistachio_clksrc_of_init);
+18 -12
drivers/clocksource/timer-atlas7.c
··· 238 238 .notifier_call = sirfsoc_cpu_notify, 239 239 }; 240 240 241 - static void __init sirfsoc_clockevent_init(void) 241 + static int __init sirfsoc_clockevent_init(void) 242 242 { 243 243 sirfsoc_clockevent = alloc_percpu(struct clock_event_device); 244 244 BUG_ON(!sirfsoc_clockevent); ··· 246 246 BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb)); 247 247 248 248 /* Immediately configure the timer on the boot CPU */ 249 - sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); 249 + return sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); 250 250 } 251 251 252 252 /* initialize the kernel jiffy timer source */ 253 - static void __init sirfsoc_atlas7_timer_init(struct device_node *np) 253 + static int __init sirfsoc_atlas7_timer_init(struct device_node *np) 254 254 { 255 255 struct clk *clk; 256 256 ··· 279 279 280 280 BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, atlas7_timer_rate)); 281 281 282 - sirfsoc_clockevent_init(); 282 + return sirfsoc_clockevent_init(); 283 283 } 284 284 285 - static void __init sirfsoc_of_timer_init(struct device_node *np) 285 + static int __init sirfsoc_of_timer_init(struct device_node *np) 286 286 { 287 287 sirfsoc_timer_base = of_iomap(np, 0); 288 - if (!sirfsoc_timer_base) 289 - panic("unable to map timer cpu registers\n"); 288 + if (!sirfsoc_timer_base) { 289 + pr_err("unable to map timer cpu registers\n"); 290 + return -ENXIO; 291 + } 290 292 291 293 sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); 292 - if (!sirfsoc_timer_irq.irq) 293 - panic("No irq passed for timer0 via DT\n"); 294 + if (!sirfsoc_timer_irq.irq) { 295 + pr_err("No irq passed for timer0 via DT\n"); 296 + return -EINVAL; 297 + } 294 298 295 299 sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1); 296 - if (!sirfsoc_timer1_irq.irq) 297 - panic("No irq passed for timer1 via DT\n"); 300 + if (!sirfsoc_timer1_irq.irq) { 301 + pr_err("No irq passed for timer1 via DT\n"); 302 + return -EINVAL; 303 + } 298 304 299 - sirfsoc_atlas7_timer_init(np); 305 + return sirfsoc_atlas7_timer_init(np); 300 306 } 301 307 CLOCKSOURCE_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init);
+28 -13
drivers/clocksource/timer-atmel-pit.c
··· 177 177 /* 178 178 * Set up both clocksource and clockevent support. 179 179 */ 180 - static void __init at91sam926x_pit_common_init(struct pit_data *data) 180 + static int __init at91sam926x_pit_common_init(struct pit_data *data) 181 181 { 182 182 unsigned long pit_rate; 183 183 unsigned bits; ··· 204 204 data->clksrc.rating = 175; 205 205 data->clksrc.read = read_pit_clk; 206 206 data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; 207 - clocksource_register_hz(&data->clksrc, pit_rate); 207 + 208 + ret = clocksource_register_hz(&data->clksrc, pit_rate); 209 + if (ret) { 210 + pr_err("Failed to register clocksource"); 211 + return ret; 212 + } 208 213 209 214 /* Set up irq handler */ 210 215 ret = request_irq(data->irq, at91sam926x_pit_interrupt, 211 216 IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, 212 217 "at91_tick", data); 213 - if (ret) 214 - panic(pr_fmt("Unable to setup IRQ\n")); 218 + if (ret) { 219 + pr_err("Unable to setup IRQ\n"); 220 + return ret; 221 + } 215 222 216 223 /* Set up and register clockevents */ 217 224 data->clkevt.name = "pit"; ··· 233 226 data->clkevt.resume = at91sam926x_pit_resume; 234 227 data->clkevt.suspend = at91sam926x_pit_suspend; 235 228 clockevents_register_device(&data->clkevt); 229 + 230 + return 0; 236 231 } 237 232 238 - static void __init at91sam926x_pit_dt_init(struct device_node *node) 233 + static int __init at91sam926x_pit_dt_init(struct device_node *node) 239 234 { 240 235 struct pit_data *data; 241 236 242 237 data = kzalloc(sizeof(*data), GFP_KERNEL); 243 238 if (!data) 244 - panic(pr_fmt("Unable to allocate memory\n")); 239 + return -ENOMEM; 245 240 246 241 data->base = of_iomap(node, 0); 247 - if (!data->base) 248 - panic(pr_fmt("Could not map PIT address\n")); 242 + if (!data->base) { 243 + pr_err("Could not map PIT address\n"); 244 + return -ENXIO; 245 + } 249 246 250 247 data->mck = of_clk_get(node, 0); 251 248 if (IS_ERR(data->mck)) 252 249 /* Fallback on clkdev for !CCF-based boards */ 253 250 data->mck = clk_get(NULL, "mck"); 254 251 255 - if (IS_ERR(data->mck)) 256 - panic(pr_fmt("Unable to get mck clk\n")); 252 + if (IS_ERR(data->mck)) { 253 + pr_err("Unable to get mck clk\n"); 254 + return PTR_ERR(data->mck); 255 + } 257 256 258 257 /* Get the interrupts property */ 259 258 data->irq = irq_of_parse_and_map(node, 0); 260 - if (!data->irq) 261 - panic(pr_fmt("Unable to get IRQ from DT\n")); 259 + if (!data->irq) { 260 + pr_err("Unable to get IRQ from DT\n"); 261 + return -EINVAL; 262 + } 262 263 263 - at91sam926x_pit_common_init(data); 264 + return at91sam926x_pit_common_init(data); 264 265 } 265 266 CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit", 266 267 at91sam926x_pit_dt_init);
+27 -15
drivers/clocksource/timer-atmel-st.c
··· 194 194 /* 195 195 * ST (system timer) module supports both clockevents and clocksource. 196 196 */ 197 - static void __init atmel_st_timer_init(struct device_node *node) 197 + static int __init atmel_st_timer_init(struct device_node *node) 198 198 { 199 199 struct clk *sclk; 200 200 unsigned int sclk_rate, val; 201 201 int irq, ret; 202 202 203 203 regmap_st = syscon_node_to_regmap(node); 204 - if (IS_ERR(regmap_st)) 205 - panic(pr_fmt("Unable to get regmap\n")); 204 + if (IS_ERR(regmap_st)) { 205 + pr_err("Unable to get regmap\n"); 206 + return PTR_ERR(regmap_st); 207 + } 206 208 207 209 /* Disable all timer interrupts, and clear any pending ones */ 208 210 regmap_write(regmap_st, AT91_ST_IDR, ··· 213 211 214 212 /* Get the interrupts property */ 215 213 irq = irq_of_parse_and_map(node, 0); 216 - if (!irq) 217 - panic(pr_fmt("Unable to get IRQ from DT\n")); 214 + if (!irq) { 215 + pr_err("Unable to get IRQ from DT\n"); 216 + return -EINVAL; 217 + } 218 218 219 219 /* Make IRQs happen for the system timer */ 220 220 ret = request_irq(irq, at91rm9200_timer_interrupt, 221 221 IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, 222 222 "at91_tick", regmap_st); 223 - if (ret) 224 - panic(pr_fmt("Unable to setup IRQ\n")); 223 + if (ret) { 224 + pr_err("Unable to setup IRQ\n"); 225 + return ret; 226 + } 225 227 226 228 sclk = of_clk_get(node, 0); 227 - if (IS_ERR(sclk)) 228 - panic(pr_fmt("Unable to get slow clock\n")); 229 + if (IS_ERR(sclk)) { 230 + pr_err("Unable to get slow clock\n"); 231 + return PTR_ERR(sclk); 232 + } 229 233 230 - clk_prepare_enable(sclk); 231 - if (ret) 232 - panic(pr_fmt("Could not enable slow clock\n")); 234 + ret = clk_prepare_enable(sclk); 235 + if (ret) { 236 + pr_err("Could not enable slow clock\n"); 237 + return ret; 238 + } 233 239 234 240 sclk_rate = clk_get_rate(sclk); 235 - if (!sclk_rate) 236 - panic(pr_fmt("Invalid slow clock rate\n")); 241 + if (!sclk_rate) { 242 + pr_err("Invalid slow clock rate\n"); 243 + return -EINVAL; 244 + } 237 245 timer_latch = (sclk_rate + HZ / 2) / HZ; 238 246 239 247 /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used ··· 258 246 2, AT91_ST_ALMV); 259 247 260 248 /* register clocksource */ 261 - clocksource_register_hz(&clk32k, sclk_rate); 249 + return clocksource_register_hz(&clk32k, sclk_rate); 262 250 } 263 251 CLOCKSOURCE_OF_DECLARE(atmel_st_timer, "atmel,at91rm9200-st", 264 252 atmel_st_timer_init);
+10 -6
drivers/clocksource/timer-digicolor.c
··· 63 63 int timer_id; /* one of TIMER_* */ 64 64 }; 65 65 66 - struct digicolor_timer *dc_timer(struct clock_event_device *ce) 66 + static struct digicolor_timer *dc_timer(struct clock_event_device *ce) 67 67 { 68 68 return container_of(ce, struct digicolor_timer, ce); 69 69 } ··· 148 148 return ~readl(dc_timer_dev.base + COUNT(TIMER_B)); 149 149 } 150 150 151 - static void __init digicolor_timer_init(struct device_node *node) 151 + static int __init digicolor_timer_init(struct device_node *node) 152 152 { 153 153 unsigned long rate; 154 154 struct clk *clk; ··· 161 161 dc_timer_dev.base = of_iomap(node, 0); 162 162 if (!dc_timer_dev.base) { 163 163 pr_err("Can't map registers"); 164 - return; 164 + return -ENXIO; 165 165 } 166 166 167 167 irq = irq_of_parse_and_map(node, dc_timer_dev.timer_id); 168 168 if (irq <= 0) { 169 169 pr_err("Can't parse IRQ"); 170 - return; 170 + return -EINVAL; 171 171 } 172 172 173 173 clk = of_clk_get(node, 0); 174 174 if (IS_ERR(clk)) { 175 175 pr_err("Can't get timer clock"); 176 - return; 176 + return PTR_ERR(clk); 177 177 } 178 178 clk_prepare_enable(clk); 179 179 rate = clk_get_rate(clk); ··· 190 190 ret = request_irq(irq, digicolor_timer_interrupt, 191 191 IRQF_TIMER | IRQF_IRQPOLL, "digicolor_timerC", 192 192 &dc_timer_dev.ce); 193 - if (ret) 193 + if (ret) { 194 194 pr_warn("request of timer irq %d failed (%d)\n", irq, ret); 195 + return ret; 196 + } 195 197 196 198 dc_timer_dev.ce.cpumask = cpu_possible_mask; 197 199 dc_timer_dev.ce.irq = irq; 198 200 199 201 clockevents_config_and_register(&dc_timer_dev.ce, rate, 0, 0xffffffff); 202 + 203 + return 0; 200 204 } 201 205 CLOCKSOURCE_OF_DECLARE(conexant_digicolor, "cnxt,cx92755-timer", 202 206 digicolor_timer_init);
+33 -18
drivers/clocksource/timer-imx-gpt.c
··· 407 407 .set_next_event = v2_set_next_event, 408 408 }; 409 409 410 - static void __init _mxc_timer_init(struct imx_timer *imxtm) 410 + static int __init _mxc_timer_init(struct imx_timer *imxtm) 411 411 { 412 + int ret; 413 + 412 414 switch (imxtm->type) { 413 415 case GPT_TYPE_IMX1: 414 416 imxtm->gpt = &imx1_gpt_data; ··· 425 423 imxtm->gpt = &imx6dl_gpt_data; 426 424 break; 427 425 default: 428 - BUG(); 426 + return -EINVAL; 429 427 } 430 428 431 429 if (IS_ERR(imxtm->clk_per)) { 432 430 pr_err("i.MX timer: unable to get clk\n"); 433 - return; 431 + return PTR_ERR(imxtm->clk_per); 434 432 } 435 433 436 434 if (!IS_ERR(imxtm->clk_ipg)) ··· 448 446 imxtm->gpt->gpt_setup_tctl(imxtm); 449 447 450 448 /* init and register the timer to the framework */ 451 - mxc_clocksource_init(imxtm); 452 - mxc_clockevent_init(imxtm); 449 + ret = mxc_clocksource_init(imxtm); 450 + if (ret) 451 + return ret; 452 + 453 + return mxc_clockevent_init(imxtm); 453 454 } 454 455 455 456 void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type) ··· 474 469 _mxc_timer_init(imxtm); 475 470 } 476 471 477 - static void __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type) 472 + static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type) 478 473 { 479 474 struct imx_timer *imxtm; 480 475 static int initialized; 476 + int ret; 481 477 482 478 /* Support one instance only */ 483 479 if (initialized) 484 - return; 480 + return 0; 485 481 486 482 imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL); 487 - BUG_ON(!imxtm); 483 + if (!imxtm) 484 + return -ENOMEM; 488 485 489 486 imxtm->base = of_iomap(np, 0); 490 - WARN_ON(!imxtm->base); 487 + if (!imxtm->base) 488 + return -ENXIO; 489 + 491 490 imxtm->irq = irq_of_parse_and_map(np, 0); 491 + if (imxtm->irq <= 0) 492 + return -EINVAL; 492 493 493 494 imxtm->clk_ipg = of_clk_get_by_name(np, "ipg"); 494 495 ··· 505 494 506 495 imxtm->type = type; 507 496 508 - _mxc_timer_init(imxtm); 497 + ret = _mxc_timer_init(imxtm); 498 + if (ret) 499 + return ret; 509 500 510 501 initialized = 1; 502 + 503 + return 0; 511 504 } 512 505 513 - static void __init imx1_timer_init_dt(struct device_node *np) 506 + static int __init imx1_timer_init_dt(struct device_node *np) 514 507 { 515 - mxc_timer_init_dt(np, GPT_TYPE_IMX1); 508 + return mxc_timer_init_dt(np, GPT_TYPE_IMX1); 516 509 } 517 510 518 - static void __init imx21_timer_init_dt(struct device_node *np) 511 + static int __init imx21_timer_init_dt(struct device_node *np) 519 512 { 520 - mxc_timer_init_dt(np, GPT_TYPE_IMX21); 513 + return mxc_timer_init_dt(np, GPT_TYPE_IMX21); 521 514 } 522 515 523 - static void __init imx31_timer_init_dt(struct device_node *np) 516 + static int __init imx31_timer_init_dt(struct device_node *np) 524 517 { 525 518 enum imx_gpt_type type = GPT_TYPE_IMX31; 526 519 ··· 537 522 if (of_machine_is_compatible("fsl,imx6dl")) 538 523 type = GPT_TYPE_IMX6DL; 539 524 540 - mxc_timer_init_dt(np, type); 525 + return mxc_timer_init_dt(np, type); 541 526 } 542 527 543 - static void __init imx6dl_timer_init_dt(struct device_node *np) 528 + static int __init imx6dl_timer_init_dt(struct device_node *np) 544 529 { 545 - mxc_timer_init_dt(np, GPT_TYPE_IMX6DL); 530 + return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL); 546 531 } 547 532 548 533 CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
+37 -20
drivers/clocksource/timer-integrator-ap.c
··· 36 36 return -readl(sched_clk_base + TIMER_VALUE); 37 37 } 38 38 39 - static void integrator_clocksource_init(unsigned long inrate, 40 - void __iomem *base) 39 + static int integrator_clocksource_init(unsigned long inrate, 40 + void __iomem *base) 41 41 { 42 42 u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC; 43 43 unsigned long rate = inrate; 44 + int ret; 44 45 45 46 if (rate >= 1500000) { 46 47 rate /= 16; ··· 51 50 writel(0xffff, base + TIMER_LOAD); 52 51 writel(ctrl, base + TIMER_CTRL); 53 52 54 - clocksource_mmio_init(base + TIMER_VALUE, "timer2", 55 - rate, 200, 16, clocksource_mmio_readl_down); 53 + ret = clocksource_mmio_init(base + TIMER_VALUE, "timer2", 54 + rate, 200, 16, clocksource_mmio_readl_down); 55 + if (ret) 56 + return ret; 56 57 57 58 sched_clk_base = base; 58 59 sched_clock_register(integrator_read_sched_clock, 16, rate); 60 + 61 + return 0; 59 62 } 60 63 61 64 static unsigned long timer_reload; ··· 143 138 .dev_id = &integrator_clockevent, 144 139 }; 145 140 146 - static void integrator_clockevent_init(unsigned long inrate, 147 - void __iomem *base, int irq) 141 + static int integrator_clockevent_init(unsigned long inrate, 142 + void __iomem *base, int irq) 148 143 { 149 144 unsigned long rate = inrate; 150 145 unsigned int ctrl = 0; 146 + int ret; 151 147 152 148 clkevt_base = base; 153 149 /* Calculate and program a divisor */ ··· 162 156 timer_reload = rate / HZ; 163 157 writel(ctrl, clkevt_base + TIMER_CTRL); 164 158 165 - setup_irq(irq, &integrator_timer_irq); 159 + ret = setup_irq(irq, &integrator_timer_irq); 160 + if (ret) 161 + return ret; 162 + 166 163 clockevents_config_and_register(&integrator_clockevent, 167 164 rate, 168 165 1, 169 166 0xffffU); 167 + return 0; 170 168 } 171 169 172 - static void __init integrator_ap_timer_init_of(struct device_node *node) 170 + static int __init integrator_ap_timer_init_of(struct device_node *node) 173 171 { 174 172 const char *path; 175 173 void __iomem *base; ··· 186 176 187 177 base = of_io_request_and_map(node, 0, "integrator-timer"); 188 178 if (IS_ERR(base)) 189 - return; 179 + return PTR_ERR(base); 190 180 191 181 clk = of_clk_get(node, 0); 192 182 if (IS_ERR(clk)) { 193 183 pr_err("No clock for %s\n", node->name); 194 - return; 184 + return PTR_ERR(clk); 195 185 } 196 186 clk_prepare_enable(clk); 197 187 rate = clk_get_rate(clk); ··· 199 189 200 190 err = of_property_read_string(of_aliases, 201 191 "arm,timer-primary", &path); 202 - if (WARN_ON(err)) 203 - return; 192 + if (err) { 193 + pr_warn("Failed to read property"); 194 + return err; 195 + } 196 + 204 197 pri_node = of_find_node_by_path(path); 198 + 205 199 err = of_property_read_string(of_aliases, 206 200 "arm,timer-secondary", &path); 207 - if (WARN_ON(err)) 208 - return; 201 + if (err) { 202 + pr_warn("Failed to read property"); 203 + return err; 204 + } 205 + 206 + 209 207 sec_node = of_find_node_by_path(path); 210 208 211 - if (node == pri_node) { 209 + if (node == pri_node) 212 210 /* The primary timer lacks IRQ, use as clocksource */ 213 - integrator_clocksource_init(rate, base); 214 - return; 215 - } 211 + return integrator_clocksource_init(rate, base); 216 212 217 213 if (node == sec_node) { 218 214 /* The secondary timer will drive the clock event */ 219 215 irq = irq_of_parse_and_map(node, 0); 220 - integrator_clockevent_init(rate, base, irq); 221 - return; 216 + return integrator_clockevent_init(rate, base, irq); 222 217 } 223 218 224 219 pr_info("Timer @%p unused\n", base); 225 220 clk_disable_unprepare(clk); 221 + 222 + return 0; 226 223 } 227 224 228 225 CLOCKSOURCE_OF_DECLARE(integrator_ap_timer, "arm,integrator-timer",
+7 -6
drivers/clocksource/timer-keystone.c
··· 144 144 return 0; 145 145 } 146 146 147 - static void __init keystone_timer_init(struct device_node *np) 147 + static int __init keystone_timer_init(struct device_node *np) 148 148 { 149 149 struct clock_event_device *event_dev = &timer.event_dev; 150 150 unsigned long rate; ··· 154 154 irq = irq_of_parse_and_map(np, 0); 155 155 if (!irq) { 156 156 pr_err("%s: failed to map interrupts\n", __func__); 157 - return; 157 + return -EINVAL; 158 158 } 159 159 160 160 timer.base = of_iomap(np, 0); 161 161 if (!timer.base) { 162 162 pr_err("%s: failed to map registers\n", __func__); 163 - return; 163 + return -ENXIO; 164 164 } 165 165 166 166 clk = of_clk_get(np, 0); 167 167 if (IS_ERR(clk)) { 168 168 pr_err("%s: failed to get clock\n", __func__); 169 169 iounmap(timer.base); 170 - return; 170 + return PTR_ERR(clk); 171 171 } 172 172 173 173 error = clk_prepare_enable(clk); ··· 219 219 clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX); 220 220 221 221 pr_info("keystone timer clock @%lu Hz\n", rate); 222 - return; 222 + return 0; 223 223 err: 224 224 clk_put(clk); 225 225 iounmap(timer.base); 226 + return error; 226 227 } 227 228 228 229 CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer", 229 - keystone_timer_init); 230 + keystone_timer_init);
+8 -6
drivers/clocksource/timer-nps.c
··· 55 55 return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]); 56 56 } 57 57 58 - static void __init nps_setup_clocksource(struct device_node *node, 59 - struct clk *clk) 58 + static int __init nps_setup_clocksource(struct device_node *node, 59 + struct clk *clk) 60 60 { 61 61 int ret, cluster; 62 62 ··· 68 68 ret = clk_prepare_enable(clk); 69 69 if (ret) { 70 70 pr_err("Couldn't enable parent clock\n"); 71 - return; 71 + return ret; 72 72 } 73 73 74 74 nps_timer_rate = clk_get_rate(clk); ··· 79 79 pr_err("Couldn't register clock source.\n"); 80 80 clk_disable_unprepare(clk); 81 81 } 82 + 83 + return ret; 82 84 } 83 85 84 - static void __init nps_timer_init(struct device_node *node) 86 + static int __init nps_timer_init(struct device_node *node) 85 87 { 86 88 struct clk *clk; 87 89 88 90 clk = of_clk_get(node, 0); 89 91 if (IS_ERR(clk)) { 90 92 pr_err("Can't get timer clock.\n"); 91 - return; 93 + return PTR_ERR(clk); 92 94 } 93 95 94 - nps_setup_clocksource(node, clk); 96 + return nps_setup_clocksource(node, clk); 95 97 } 96 98 97 99 CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
+297
drivers/clocksource/timer-oxnas-rps.c
··· 1 + /* 2 + * drivers/clocksource/timer-oxnas-rps.c 3 + * 4 + * Copyright (C) 2009 Oxford Semiconductor Ltd 5 + * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com> 6 + * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms and conditions of the GNU General Public License, 10 + * version 2, as published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 + */ 20 + 21 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 + 23 + #include <linux/init.h> 24 + #include <linux/irq.h> 25 + #include <linux/io.h> 26 + #include <linux/clk.h> 27 + #include <linux/slab.h> 28 + #include <linux/interrupt.h> 29 + #include <linux/of_irq.h> 30 + #include <linux/of_address.h> 31 + #include <linux/clockchips.h> 32 + #include <linux/sched_clock.h> 33 + 34 + /* TIMER1 used as tick 35 + * TIMER2 used as clocksource 36 + */ 37 + 38 + /* Registers definitions */ 39 + 40 + #define TIMER_LOAD_REG 0x0 41 + #define TIMER_CURR_REG 0x4 42 + #define TIMER_CTRL_REG 0x8 43 + #define TIMER_CLRINT_REG 0xC 44 + 45 + #define TIMER_BITS 24 46 + 47 + #define TIMER_MAX_VAL (BIT(TIMER_BITS) - 1) 48 + 49 + #define TIMER_PERIODIC BIT(6) 50 + #define TIMER_ENABLE BIT(7) 51 + 52 + #define TIMER_DIV1 (0) 53 + #define TIMER_DIV16 (1 << 2) 54 + #define TIMER_DIV256 (2 << 2) 55 + 56 + #define TIMER1_REG_OFFSET 0 57 + #define TIMER2_REG_OFFSET 0x20 58 + 59 + /* Clockevent & Clocksource data */ 60 + 61 + struct oxnas_rps_timer { 62 + struct clock_event_device clkevent; 63 + void __iomem *clksrc_base; 64 + void __iomem *clkevt_base; 65 + unsigned long timer_period; 66 + unsigned int timer_prescaler; 67 + struct clk *clk; 68 + int irq; 69 + }; 70 + 71 + static irqreturn_t oxnas_rps_timer_irq(int irq, void *dev_id) 72 + { 73 + struct oxnas_rps_timer *rps = dev_id; 74 + 75 + writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG); 76 + 77 + rps->clkevent.event_handler(&rps->clkevent); 78 + 79 + return IRQ_HANDLED; 80 + } 81 + 82 + static void oxnas_rps_timer_config(struct oxnas_rps_timer *rps, 83 + unsigned long period, 84 + unsigned int periodic) 85 + { 86 + uint32_t cfg = rps->timer_prescaler; 87 + 88 + if (period) 89 + cfg |= TIMER_ENABLE; 90 + 91 + if (periodic) 92 + cfg |= TIMER_PERIODIC; 93 + 94 + writel_relaxed(period, rps->clkevt_base + TIMER_LOAD_REG); 95 + writel_relaxed(cfg, rps->clkevt_base + TIMER_CTRL_REG); 96 + } 97 + 98 + static int oxnas_rps_timer_shutdown(struct clock_event_device *evt) 99 + { 100 + struct oxnas_rps_timer *rps = 101 + container_of(evt, struct oxnas_rps_timer, clkevent); 102 + 103 + oxnas_rps_timer_config(rps, 0, 0); 104 + 105 + return 0; 106 + } 107 + 108 + static int oxnas_rps_timer_set_periodic(struct clock_event_device *evt) 109 + { 110 + struct oxnas_rps_timer *rps = 111 + container_of(evt, struct oxnas_rps_timer, clkevent); 112 + 113 + oxnas_rps_timer_config(rps, rps->timer_period, 1); 114 + 115 + return 0; 116 + } 117 + 118 + static int oxnas_rps_timer_set_oneshot(struct clock_event_device *evt) 119 + { 120 + struct oxnas_rps_timer *rps = 121 + container_of(evt, struct oxnas_rps_timer, clkevent); 122 + 123 + oxnas_rps_timer_config(rps, rps->timer_period, 0); 124 + 125 + return 0; 126 + } 127 + 128 + static int oxnas_rps_timer_next_event(unsigned long delta, 129 + struct clock_event_device *evt) 130 + { 131 + struct oxnas_rps_timer *rps = 132 + container_of(evt, struct oxnas_rps_timer, clkevent); 133 + 134 + oxnas_rps_timer_config(rps, delta, 0); 135 + 136 + return 0; 137 + } 138 + 139 + static int __init oxnas_rps_clockevent_init(struct oxnas_rps_timer *rps) 140 + { 141 + ulong clk_rate = clk_get_rate(rps->clk); 142 + ulong timer_rate; 143 + 144 + /* Start with prescaler 1 */ 145 + rps->timer_prescaler = TIMER_DIV1; 146 + rps->timer_period = DIV_ROUND_UP(clk_rate, HZ); 147 + timer_rate = clk_rate; 148 + 149 + if (rps->timer_period > TIMER_MAX_VAL) { 150 + rps->timer_prescaler = TIMER_DIV16; 151 + timer_rate = clk_rate / 16; 152 + rps->timer_period = DIV_ROUND_UP(timer_rate, HZ); 153 + } 154 + if (rps->timer_period > TIMER_MAX_VAL) { 155 + rps->timer_prescaler = TIMER_DIV256; 156 + timer_rate = clk_rate / 256; 157 + rps->timer_period = DIV_ROUND_UP(timer_rate, HZ); 158 + } 159 + 160 + rps->clkevent.name = "oxnas-rps"; 161 + rps->clkevent.features = CLOCK_EVT_FEAT_PERIODIC | 162 + CLOCK_EVT_FEAT_ONESHOT | 163 + CLOCK_EVT_FEAT_DYNIRQ; 164 + rps->clkevent.tick_resume = oxnas_rps_timer_shutdown; 165 + rps->clkevent.set_state_shutdown = oxnas_rps_timer_shutdown; 166 + rps->clkevent.set_state_periodic = oxnas_rps_timer_set_periodic; 167 + rps->clkevent.set_state_oneshot = oxnas_rps_timer_set_oneshot; 168 + rps->clkevent.set_next_event = oxnas_rps_timer_next_event; 169 + rps->clkevent.rating = 200; 170 + rps->clkevent.cpumask = cpu_possible_mask; 171 + rps->clkevent.irq = rps->irq; 172 + clockevents_config_and_register(&rps->clkevent, 173 + timer_rate, 174 + 1, 175 + TIMER_MAX_VAL); 176 + 177 + pr_info("Registered clock event rate %luHz prescaler %x period %lu\n", 178 + clk_rate, 179 + rps->timer_prescaler, 180 + rps->timer_period); 181 + 182 + return 0; 183 + } 184 + 185 + /* Clocksource */ 186 + 187 + static void __iomem *timer_sched_base; 188 + 189 + static u64 notrace oxnas_rps_read_sched_clock(void) 190 + { 191 + return ~readl_relaxed(timer_sched_base); 192 + } 193 + 194 + static int __init oxnas_rps_clocksource_init(struct oxnas_rps_timer *rps) 195 + { 196 + ulong clk_rate = clk_get_rate(rps->clk); 197 + int ret; 198 + 199 + /* use prescale 16 */ 200 + clk_rate = clk_rate / 16; 201 + 202 + writel_relaxed(TIMER_MAX_VAL, rps->clksrc_base + TIMER_LOAD_REG); 203 + writel_relaxed(TIMER_PERIODIC | TIMER_ENABLE | TIMER_DIV16, 204 + rps->clksrc_base + TIMER_CTRL_REG); 205 + 206 + timer_sched_base = rps->clksrc_base + TIMER_CURR_REG; 207 + sched_clock_register(oxnas_rps_read_sched_clock, 208 + TIMER_BITS, clk_rate); 209 + ret = clocksource_mmio_init(timer_sched_base, 210 + "oxnas_rps_clocksource_timer", 211 + clk_rate, 250, TIMER_BITS, 212 + clocksource_mmio_readl_down); 213 + if (WARN_ON(ret)) { 214 + pr_err("can't register clocksource\n"); 215 + return ret; 216 + } 217 + 218 + pr_info("Registered clocksource rate %luHz\n", clk_rate); 219 + 220 + return 0; 221 + } 222 + 223 + static int __init oxnas_rps_timer_init(struct device_node *np) 224 + { 225 + struct oxnas_rps_timer *rps; 226 + void __iomem *base; 227 + int ret; 228 + 229 + rps = kzalloc(sizeof(*rps), GFP_KERNEL); 230 + if (!rps) 231 + return -ENOMEM; 232 + 233 + rps->clk = of_clk_get(np, 0); 234 + if (IS_ERR(rps->clk)) { 235 + ret = PTR_ERR(rps->clk); 236 + goto err_alloc; 237 + } 238 + 239 + ret = clk_prepare_enable(rps->clk); 240 + if (ret) 241 + goto err_clk; 242 + 243 + base = of_iomap(np, 0); 244 + if (!base) { 245 + ret = -ENXIO; 246 + goto err_clk_prepare; 247 + } 248 + 249 + rps->irq = irq_of_parse_and_map(np, 0); 250 + if (rps->irq < 0) { 251 + ret = -EINVAL; 252 + goto err_iomap; 253 + } 254 + 255 + rps->clkevt_base = base + TIMER1_REG_OFFSET; 256 + rps->clksrc_base = base + TIMER2_REG_OFFSET; 257 + 258 + /* Disable timers */ 259 + writel_relaxed(0, rps->clkevt_base + TIMER_CTRL_REG); 260 + writel_relaxed(0, rps->clksrc_base + TIMER_CTRL_REG); 261 + writel_relaxed(0, rps->clkevt_base + TIMER_LOAD_REG); 262 + writel_relaxed(0, rps->clksrc_base + TIMER_LOAD_REG); 263 + writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG); 264 + writel_relaxed(0, rps->clksrc_base + TIMER_CLRINT_REG); 265 + 266 + ret = request_irq(rps->irq, oxnas_rps_timer_irq, 267 + IRQF_TIMER | IRQF_IRQPOLL, 268 + "rps-timer", rps); 269 + if (ret) 270 + goto err_iomap; 271 + 272 + ret = oxnas_rps_clocksource_init(rps); 273 + if (ret) 274 + goto err_irqreq; 275 + 276 + ret = oxnas_rps_clockevent_init(rps); 277 + if (ret) 278 + goto err_irqreq; 279 + 280 + return 0; 281 + 282 + err_irqreq: 283 + free_irq(rps->irq, rps); 284 + err_iomap: 285 + iounmap(base); 286 + err_clk_prepare: 287 + clk_disable_unprepare(rps->clk); 288 + err_clk: 289 + clk_put(rps->clk); 290 + err_alloc: 291 + kfree(rps); 292 + 293 + return ret; 294 + } 295 + 296 + CLOCKSOURCE_OF_DECLARE(ox810se_rps, 297 + "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
+31 -11
drivers/clocksource/timer-prima2.c
··· 19 19 #include <linux/of_irq.h> 20 20 #include <linux/of_address.h> 21 21 #include <linux/sched_clock.h> 22 - #include <asm/mach/time.h> 23 22 24 23 #define PRIMA2_CLOCK_FREQ 1000000 25 24 ··· 188 189 } 189 190 190 191 /* initialize the kernel jiffy timer source */ 191 - static void __init sirfsoc_prima2_timer_init(struct device_node *np) 192 + static int __init sirfsoc_prima2_timer_init(struct device_node *np) 192 193 { 193 194 unsigned long rate; 194 195 struct clk *clk; 196 + int ret; 195 197 196 198 clk = of_clk_get(np, 0); 197 - BUG_ON(IS_ERR(clk)); 199 + if (IS_ERR(clk)) { 200 + pr_err("Failed to get clock"); 201 + return PTR_ERR(clk); 202 + } 198 203 199 - BUG_ON(clk_prepare_enable(clk)); 204 + ret = clk_prepare_enable(clk); 205 + if (ret) { 206 + pr_err("Failed to enable clock"); 207 + return ret; 208 + } 200 209 201 210 rate = clk_get_rate(clk); 202 211 203 - BUG_ON(rate < PRIMA2_CLOCK_FREQ); 204 - BUG_ON(rate % PRIMA2_CLOCK_FREQ); 212 + if (rate < PRIMA2_CLOCK_FREQ || rate % PRIMA2_CLOCK_FREQ) { 213 + pr_err("Invalid clock rate"); 214 + return -EINVAL; 215 + } 205 216 206 217 sirfsoc_timer_base = of_iomap(np, 0); 207 - if (!sirfsoc_timer_base) 208 - panic("unable to map timer cpu registers\n"); 218 + if (!sirfsoc_timer_base) { 219 + pr_err("unable to map timer cpu registers\n"); 220 + return -ENXIO; 221 + } 209 222 210 223 sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); 211 224 ··· 227 216 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); 228 217 writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); 229 218 230 - BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, 231 - PRIMA2_CLOCK_FREQ)); 219 + ret = clocksource_register_hz(&sirfsoc_clocksource, PRIMA2_CLOCK_FREQ); 220 + if (ret) { 221 + pr_err("Failed to register clocksource"); 222 + return ret; 223 + } 232 224 233 225 sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ); 234 226 235 - BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq)); 227 + ret = setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); 228 + if (ret) { 229 + pr_err("Failed to setup irq"); 230 + return ret; 231 + } 236 232 237 233 sirfsoc_clockevent_init(); 234 + 235 + return 0; 238 236 } 239 237 CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer, 240 238 "sirf,prima2-tick", sirfsoc_prima2_timer_init);
+57 -29
drivers/clocksource/timer-sp804.c
··· 77 77 writel(0, base + TIMER_CTRL); 78 78 } 79 79 80 - void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base, 80 + int __init __sp804_clocksource_and_sched_clock_init(void __iomem *base, 81 81 const char *name, 82 82 struct clk *clk, 83 83 int use_sched_clock) ··· 89 89 if (IS_ERR(clk)) { 90 90 pr_err("sp804: clock not found: %d\n", 91 91 (int)PTR_ERR(clk)); 92 - return; 92 + return PTR_ERR(clk); 93 93 } 94 94 } 95 95 96 96 rate = sp804_get_clock_rate(clk); 97 - 98 97 if (rate < 0) 99 - return; 98 + return -EINVAL; 100 99 101 100 /* setup timer 0 as free-running clocksource */ 102 101 writel(0, base + TIMER_CTRL); ··· 111 112 sched_clock_base = base; 112 113 sched_clock_register(sp804_read, 32, rate); 113 114 } 115 + 116 + return 0; 114 117 } 115 118 116 119 ··· 187 186 .dev_id = &sp804_clockevent, 188 187 }; 189 188 190 - void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name) 189 + int __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name) 191 190 { 192 191 struct clock_event_device *evt = &sp804_clockevent; 193 192 long rate; ··· 197 196 if (IS_ERR(clk)) { 198 197 pr_err("sp804: %s clock not found: %d\n", name, 199 198 (int)PTR_ERR(clk)); 200 - return; 199 + return PTR_ERR(clk); 201 200 } 202 201 203 202 rate = sp804_get_clock_rate(clk); 204 203 if (rate < 0) 205 - return; 204 + return -EINVAL; 206 205 207 206 clkevt_base = base; 208 207 clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ); ··· 214 213 215 214 setup_irq(irq, &sp804_timer_irq); 216 215 clockevents_config_and_register(evt, rate, 0xf, 0xffffffff); 216 + 217 + return 0; 217 218 } 218 219 219 - static void __init sp804_of_init(struct device_node *np) 220 + static int __init sp804_of_init(struct device_node *np) 220 221 { 221 222 static bool initialized = false; 222 223 void __iomem *base; 223 - int irq; 224 + int irq, ret = -EINVAL; 224 225 u32 irq_num = 0; 225 226 struct clk *clk1, *clk2; 226 227 const char *name = of_get_property(np, "compatible", NULL); 227 228 228 229 base = of_iomap(np, 0); 229 - if (WARN_ON(!base)) 230 - return; 230 + if (!base) 231 + return -ENXIO; 231 232 232 233 /* Ensure timers are disabled */ 233 234 writel(0, base + TIMER_CTRL); 234 235 writel(0, base + TIMER_2_BASE + TIMER_CTRL); 235 236 236 - if (initialized || !of_device_is_available(np)) 237 + if (initialized || !of_device_is_available(np)) { 238 + ret = -EINVAL; 237 239 goto err; 240 + } 238 241 239 242 clk1 = of_clk_get(np, 0); 240 243 if (IS_ERR(clk1)) ··· 261 256 262 257 of_property_read_u32(np, "arm,sp804-has-irq", &irq_num); 263 258 if (irq_num == 2) { 264 - __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name); 265 - __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1); 259 + 260 + ret = __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name); 261 + if (ret) 262 + goto err; 263 + 264 + ret = __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1); 265 + if (ret) 266 + goto err; 266 267 } else { 267 - __sp804_clockevents_init(base, irq, clk1 , name); 268 - __sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE, 269 - name, clk2, 1); 268 + 269 + ret = __sp804_clockevents_init(base, irq, clk1 , name); 270 + if (ret) 271 + goto err; 272 + 273 + ret =__sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE, 274 + name, clk2, 1); 275 + if (ret) 276 + goto err; 270 277 } 271 278 initialized = true; 272 279 273 - return; 280 + return 0; 274 281 err: 275 282 iounmap(base); 283 + return ret; 276 284 } 277 285 CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init); 278 286 279 - static void __init integrator_cp_of_init(struct device_node *np) 287 + static int __init integrator_cp_of_init(struct device_node *np) 280 288 { 281 289 static int init_count = 0; 282 290 void __iomem *base; 283 - int irq; 291 + int irq, ret = -EINVAL; 284 292 const char *name = of_get_property(np, "compatible", NULL); 285 293 struct clk *clk; 286 294 287 295 base = of_iomap(np, 0); 288 - if (WARN_ON(!base)) 289 - return; 296 + if (!base) { 297 + pr_err("Failed to iomap"); 298 + return -ENXIO; 299 + } 300 + 290 301 clk = of_clk_get(np, 0); 291 - if (WARN_ON(IS_ERR(clk))) 292 - return; 302 + if (IS_ERR(clk)) { 303 + pr_err("Failed to get clock"); 304 + return PTR_ERR(clk); 305 + } 293 306 294 307 /* Ensure timer is disabled */ 295 308 writel(0, base + TIMER_CTRL); ··· 315 292 if (init_count == 2 || !of_device_is_available(np)) 316 293 goto err; 317 294 318 - if (!init_count) 319 - __sp804_clocksource_and_sched_clock_init(base, name, clk, 0); 320 - else { 295 + if (!init_count) { 296 + ret = __sp804_clocksource_and_sched_clock_init(base, name, clk, 0); 297 + if (ret) 298 + goto err; 299 + } else { 321 300 irq = irq_of_parse_and_map(np, 0); 322 301 if (irq <= 0) 323 302 goto err; 324 303 325 - __sp804_clockevents_init(base, irq, clk, name); 304 + ret = __sp804_clockevents_init(base, irq, clk, name); 305 + if (ret) 306 + goto err; 326 307 } 327 308 328 309 init_count++; 329 - return; 310 + return 0; 330 311 err: 331 312 iounmap(base); 313 + return ret; 332 314 } 333 315 CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init);
+5 -3
drivers/clocksource/timer-stm32.c
··· 98 98 }, 99 99 }; 100 100 101 - static void __init stm32_clockevent_init(struct device_node *np) 101 + static int __init stm32_clockevent_init(struct device_node *np) 102 102 { 103 103 struct stm32_clock_event_ddata *data = &clock_event_ddata; 104 104 struct clk *clk; ··· 130 130 131 131 data->base = of_iomap(np, 0); 132 132 if (!data->base) { 133 + ret = -ENXIO; 133 134 pr_err("failed to map registers for clockevent\n"); 134 135 goto err_iomap; 135 136 } 136 137 137 138 irq = irq_of_parse_and_map(np, 0); 138 139 if (!irq) { 140 + ret = -EINVAL; 139 141 pr_err("%s: failed to get irq.\n", np->full_name); 140 142 goto err_get_irq; 141 143 } ··· 175 173 pr_info("%s: STM32 clockevent driver initialized (%d bits)\n", 176 174 np->full_name, bits); 177 175 178 - return; 176 + return ret; 179 177 180 178 err_get_irq: 181 179 iounmap(data->base); ··· 184 182 err_clk_enable: 185 183 clk_put(clk); 186 184 err_clk_get: 187 - return; 185 + return ret; 188 186 } 189 187 190 188 CLOCKSOURCE_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
+21 -12
drivers/clocksource/timer-sun5i.c
··· 311 311 return ret; 312 312 } 313 313 314 - static void __init sun5i_timer_init(struct device_node *node) 314 + static int __init sun5i_timer_init(struct device_node *node) 315 315 { 316 316 struct reset_control *rstc; 317 317 void __iomem *timer_base; 318 318 struct clk *clk; 319 - int irq; 319 + int irq, ret; 320 320 321 321 timer_base = of_io_request_and_map(node, 0, of_node_full_name(node)); 322 - if (IS_ERR(timer_base)) 323 - panic("Can't map registers"); 322 + if (IS_ERR(timer_base)) { 323 + pr_err("Can't map registers"); 324 + return PTR_ERR(timer_base);; 325 + } 324 326 325 327 irq = irq_of_parse_and_map(node, 0); 326 - if (irq <= 0) 327 - panic("Can't parse IRQ"); 328 + if (irq <= 0) { 329 + pr_err("Can't parse IRQ"); 330 + return -EINVAL; 331 + } 328 332 329 333 clk = of_clk_get(node, 0); 330 - if (IS_ERR(clk)) 331 - panic("Can't get timer clock"); 334 + if (IS_ERR(clk)) { 335 + pr_err("Can't get timer clock"); 336 + return PTR_ERR(clk); 337 + } 332 338 333 339 rstc = of_reset_control_get(node, NULL); 334 340 if (!IS_ERR(rstc)) 335 341 reset_control_deassert(rstc); 336 342 337 - sun5i_setup_clocksource(node, timer_base, clk, irq); 338 - sun5i_setup_clockevent(node, timer_base, clk, irq); 343 + ret = sun5i_setup_clocksource(node, timer_base, clk, irq); 344 + if (ret) 345 + return ret; 346 + 347 + return sun5i_setup_clockevent(node, timer_base, clk, irq); 339 348 } 340 349 CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", 341 - sun5i_timer_init); 350 + sun5i_timer_init); 342 351 CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer", 343 - sun5i_timer_init); 352 + sun5i_timer_init);
+5 -3
drivers/clocksource/timer-ti-32k.c
··· 88 88 return ti_32k_read_cycles(&ti_32k_timer.cs); 89 89 } 90 90 91 - static void __init ti_32k_timer_init(struct device_node *np) 91 + static int __init ti_32k_timer_init(struct device_node *np) 92 92 { 93 93 int ret; 94 94 95 95 ti_32k_timer.base = of_iomap(np, 0); 96 96 if (!ti_32k_timer.base) { 97 97 pr_err("Can't ioremap 32k timer base\n"); 98 - return; 98 + return -ENXIO; 99 99 } 100 100 101 101 ti_32k_timer.counter = ti_32k_timer.base; ··· 116 116 ret = clocksource_register_hz(&ti_32k_timer.cs, 32768); 117 117 if (ret) { 118 118 pr_err("32k_counter: can't register clocksource\n"); 119 - return; 119 + return ret; 120 120 } 121 121 122 122 sched_clock_register(omap_32k_read_sched_clock, 32, 32768); 123 123 pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n"); 124 + 125 + return 0; 124 126 } 125 127 CLOCKSOURCE_OF_DECLARE(ti_32k_timer, "ti,omap-counter32k", 126 128 ti_32k_timer_init);
+26 -10
drivers/clocksource/timer-u300.c
··· 359 359 /* 360 360 * This sets up the system timers, clock source and clock event. 361 361 */ 362 - static void __init u300_timer_init_of(struct device_node *np) 362 + static int __init u300_timer_init_of(struct device_node *np) 363 363 { 364 364 unsigned int irq; 365 365 struct clk *clk; 366 366 unsigned long rate; 367 + int ret; 367 368 368 369 u300_timer_base = of_iomap(np, 0); 369 - if (!u300_timer_base) 370 - panic("could not ioremap system timer\n"); 370 + if (!u300_timer_base) { 371 + pr_err("could not ioremap system timer\n"); 372 + return -ENXIO; 373 + } 371 374 372 375 /* Get the IRQ for the GP1 timer */ 373 376 irq = irq_of_parse_and_map(np, 2); 374 - if (!irq) 375 - panic("no IRQ for system timer\n"); 377 + if (!irq) { 378 + pr_err("no IRQ for system timer\n"); 379 + return -EINVAL; 380 + } 376 381 377 382 pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq); 378 383 379 384 /* Clock the interrupt controller */ 380 385 clk = of_clk_get(np, 0); 381 - BUG_ON(IS_ERR(clk)); 382 - clk_prepare_enable(clk); 386 + if (IS_ERR(clk)) 387 + return PTR_ERR(clk); 388 + 389 + ret = clk_prepare_enable(clk); 390 + if (ret) 391 + return ret; 392 + 383 393 rate = clk_get_rate(clk); 384 394 385 395 u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ); ··· 420 410 u300_timer_base + U300_TIMER_APP_RGPT1); 421 411 422 412 /* Set up the IRQ handler */ 423 - setup_irq(irq, &u300_timer_irq); 413 + ret = setup_irq(irq, &u300_timer_irq); 414 + if (ret) 415 + return ret; 424 416 425 417 /* Reset the General Purpose timer 2 */ 426 418 writel(U300_TIMER_APP_RGPT2_TIMER_RESET, ··· 440 428 u300_timer_base + U300_TIMER_APP_EGPT2); 441 429 442 430 /* Use general purpose timer 2 as clock source */ 443 - if (clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC, 444 - "GPT2", rate, 300, 32, clocksource_mmio_readl_up)) 431 + ret = clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC, 432 + "GPT2", rate, 300, 32, clocksource_mmio_readl_up); 433 + if (ret) { 445 434 pr_err("timer: failed to initialize U300 clock source\n"); 435 + return ret; 436 + } 446 437 447 438 /* Configure and register the clockevent */ 448 439 clockevents_config_and_register(&u300_clockevent_data.cevd, rate, ··· 455 440 * TODO: init and register the rest of the timers too, they can be 456 441 * used by hrtimers! 457 442 */ 443 + return 0; 458 444 } 459 445 460 446 CLOCKSOURCE_OF_DECLARE(u300_timer, "stericsson,u300-apptimer",
+4 -2
drivers/clocksource/versatile.c
··· 25 25 return readl(versatile_sys_24mhz); 26 26 } 27 27 28 - static void __init versatile_sched_clock_init(struct device_node *node) 28 + static int __init versatile_sched_clock_init(struct device_node *node) 29 29 { 30 30 void __iomem *base = of_iomap(node, 0); 31 31 32 32 if (!base) 33 - return; 33 + return -ENXIO; 34 34 35 35 versatile_sys_24mhz = base + SYS_24MHZ; 36 36 37 37 sched_clock_register(versatile_sys_24mhz_read, 32, 24000000); 38 + 39 + return 0; 38 40 } 39 41 CLOCKSOURCE_OF_DECLARE(vexpress, "arm,vexpress-sysreg", 40 42 versatile_sched_clock_init);
+17 -8
drivers/clocksource/vf_pit_timer.c
··· 156 156 return 0; 157 157 } 158 158 159 - static void __init pit_timer_init(struct device_node *np) 159 + static int __init pit_timer_init(struct device_node *np) 160 160 { 161 161 struct clk *pit_clk; 162 162 void __iomem *timer_base; 163 163 unsigned long clk_rate; 164 - int irq; 164 + int irq, ret; 165 165 166 166 timer_base = of_iomap(np, 0); 167 - BUG_ON(!timer_base); 167 + if (!timer_base) { 168 + pr_err("Failed to iomap"); 169 + return -ENXIO; 170 + } 168 171 169 172 /* 170 173 * PIT0 and PIT1 can be chained to build a 64-bit timer, ··· 178 175 clkevt_base = timer_base + PITn_OFFSET(3); 179 176 180 177 irq = irq_of_parse_and_map(np, 0); 181 - BUG_ON(irq <= 0); 178 + if (irq <= 0) 179 + return -EINVAL; 182 180 183 181 pit_clk = of_clk_get(np, 0); 184 - BUG_ON(IS_ERR(pit_clk)); 182 + if (IS_ERR(pit_clk)) 183 + return PTR_ERR(pit_clk); 185 184 186 - BUG_ON(clk_prepare_enable(pit_clk)); 185 + ret = clk_prepare_enable(pit_clk); 186 + if (ret) 187 + return ret; 187 188 188 189 clk_rate = clk_get_rate(pit_clk); 189 190 cycle_per_jiffy = clk_rate / (HZ); ··· 195 188 /* enable the pit module */ 196 189 __raw_writel(~PITMCR_MDIS, timer_base + PITMCR); 197 190 198 - BUG_ON(pit_clocksource_init(clk_rate)); 191 + ret = pit_clocksource_init(clk_rate); 192 + if (ret) 193 + return ret; 199 194 200 - pit_clockevent_init(clk_rate, irq); 195 + return pit_clockevent_init(clk_rate, irq); 201 196 } 202 197 CLOCKSOURCE_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
+17 -7
drivers/clocksource/vt8500_timer.c
··· 121 121 .dev_id = &clockevent, 122 122 }; 123 123 124 - static void __init vt8500_timer_init(struct device_node *np) 124 + static int __init vt8500_timer_init(struct device_node *np) 125 125 { 126 - int timer_irq; 126 + int timer_irq, ret; 127 127 128 128 regbase = of_iomap(np, 0); 129 129 if (!regbase) { 130 130 pr_err("%s: Missing iobase description in Device Tree\n", 131 131 __func__); 132 - return; 132 + return -ENXIO; 133 133 } 134 + 134 135 timer_irq = irq_of_parse_and_map(np, 0); 135 136 if (!timer_irq) { 136 137 pr_err("%s: Missing irq description in Device Tree\n", 137 138 __func__); 138 - return; 139 + return -EINVAL; 139 140 } 140 141 141 142 writel(1, regbase + TIMER_CTRL_VAL); 142 143 writel(0xf, regbase + TIMER_STATUS_VAL); 143 144 writel(~0, regbase + TIMER_MATCH_VAL); 144 145 145 - if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ)) 146 + ret = clocksource_register_hz(&clocksource, VT8500_TIMER_HZ); 147 + if (ret) { 146 148 pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n", 147 - __func__, clocksource.name); 149 + __func__, clocksource.name); 150 + return ret; 151 + } 148 152 149 153 clockevent.cpumask = cpumask_of(0); 150 154 151 - if (setup_irq(timer_irq, &irq)) 155 + ret = setup_irq(timer_irq, &irq); 156 + if (ret) { 152 157 pr_err("%s: setup_irq failed for %s\n", __func__, 153 158 clockevent.name); 159 + return ret; 160 + } 161 + 154 162 clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, 155 163 MIN_OSCR_DELTA * 2, 0xf0000000); 164 + 165 + return 0; 156 166 } 157 167 158 168 CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
+2 -2
drivers/clocksource/zevio-timer.c
··· 210 210 return ret; 211 211 } 212 212 213 - static void __init zevio_timer_init(struct device_node *node) 213 + static int __init zevio_timer_init(struct device_node *node) 214 214 { 215 - BUG_ON(zevio_timer_add(node)); 215 + return zevio_timer_add(node); 216 216 } 217 217 218 218 CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
+2 -3
drivers/cpufreq/powernv-cpufreq.c
··· 530 530 else 531 531 timer_interval = GPSTATE_TIMER_INTERVAL; 532 532 533 - mod_timer_pinned(&gpstates->timer, jiffies + 534 - msecs_to_jiffies(timer_interval)); 533 + mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval)); 535 534 } 536 535 537 536 /** ··· 698 699 policy->driver_data = gpstates; 699 700 700 701 /* initialize timer */ 701 - init_timer_deferrable(&gpstates->timer); 702 + init_timer_pinned_deferrable(&gpstates->timer); 702 703 gpstates->timer.data = (unsigned long)policy; 703 704 gpstates->timer.function = gpstate_timer_handler; 704 705 gpstates->timer.expires = jiffies +
-2
drivers/mmc/host/jz4740_mmc.c
··· 1068 1068 jz4740_mmc_clock_disable(host); 1069 1069 setup_timer(&host->timeout_timer, jz4740_mmc_timeout, 1070 1070 (unsigned long)host); 1071 - /* It is not important when it times out, it just needs to timeout. */ 1072 - set_timer_slack(&host->timeout_timer, HZ); 1073 1071 1074 1072 host->use_dma = true; 1075 1073 if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
+2 -2
drivers/net/ethernet/tile/tilepro.c
··· 588 588 static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) 589 589 { 590 590 if (!info->egress_timer_scheduled) { 591 - mod_timer_pinned(&info->egress_timer, jiffies + 1); 591 + mod_timer(&info->egress_timer, jiffies + 1); 592 592 info->egress_timer_scheduled = true; 593 593 } 594 594 } ··· 1004 1004 BUG(); 1005 1005 1006 1006 /* Initialize the egress timer. */ 1007 - init_timer(&info->egress_timer); 1007 + init_timer_pinned(&info->egress_timer); 1008 1008 info->egress_timer.data = (long)info; 1009 1009 info->egress_timer.function = tile_net_handle_egress_timer; 1010 1010
+1 -4
drivers/power/bq27xxx_battery.c
··· 735 735 736 736 bq27xxx_battery_update(di); 737 737 738 - if (poll_interval > 0) { 739 - /* The timer does not have to be accurate. */ 740 - set_timer_slack(&di->work.timer, poll_interval * HZ / 4); 738 + if (poll_interval > 0) 741 739 schedule_delayed_work(&di->work, poll_interval * HZ); 742 - } 743 740 } 744 741 745 742 /*
+2 -2
drivers/tty/metag_da.c
··· 323 323 if (channel >= 0) 324 324 fetch_data(channel); 325 325 326 - mod_timer_pinned(&poll_timer, jiffies + DA_TTY_POLL); 326 + mod_timer(&poll_timer, jiffies + DA_TTY_POLL); 327 327 } 328 328 329 329 static void add_poll_timer(struct timer_list *poll_timer) 330 330 { 331 - setup_timer(poll_timer, dashtty_timer, 0); 331 + setup_pinned_timer(poll_timer, dashtty_timer, 0); 332 332 poll_timer->expires = jiffies + DA_TTY_POLL; 333 333 334 334 /*
+2 -2
drivers/tty/mips_ejtag_fdc.c
··· 689 689 690 690 mips_ejtag_fdc_handle(priv); 691 691 if (!priv->removing) 692 - mod_timer_pinned(&priv->poll_timer, jiffies + FDC_TTY_POLL); 692 + mod_timer(&priv->poll_timer, jiffies + FDC_TTY_POLL); 693 693 } 694 694 695 695 /* TTY Port operations */ ··· 1002 1002 raw_spin_unlock_irq(&priv->lock); 1003 1003 } else { 1004 1004 /* If we didn't get an usable IRQ, poll instead */ 1005 - setup_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer, 1005 + setup_pinned_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer, 1006 1006 (unsigned long)priv); 1007 1007 priv->poll_timer.expires = jiffies + FDC_TTY_POLL; 1008 1008 /*
-1
drivers/usb/host/ohci-hcd.c
··· 500 500 501 501 setup_timer(&ohci->io_watchdog, io_watchdog_func, 502 502 (unsigned long) ohci); 503 - set_timer_slack(&ohci->io_watchdog, msecs_to_jiffies(20)); 504 503 505 504 ohci->hcca = dma_alloc_coherent (hcd->self.controller, 506 505 sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
-2
drivers/usb/host/xhci.c
··· 490 490 xhci->comp_mode_recovery_timer.expires = jiffies + 491 491 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); 492 492 493 - set_timer_slack(&xhci->comp_mode_recovery_timer, 494 - msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); 495 493 add_timer(&xhci->comp_mode_recovery_timer); 496 494 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 497 495 "Compliance mode recovery timer initialized");
+10
fs/timerfd.c
··· 390 390 clockid != CLOCK_BOOTTIME_ALARM)) 391 391 return -EINVAL; 392 392 393 + if (!capable(CAP_WAKE_ALARM) && 394 + (clockid == CLOCK_REALTIME_ALARM || 395 + clockid == CLOCK_BOOTTIME_ALARM)) 396 + return -EPERM; 397 + 393 398 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 394 399 if (!ctx) 395 400 return -ENOMEM; ··· 437 432 if (ret) 438 433 return ret; 439 434 ctx = f.file->private_data; 435 + 436 + if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) { 437 + fdput(f); 438 + return -EPERM; 439 + } 440 440 441 441 timerfd_setup_cancel(ctx, flags); 442 442
+4 -4
include/clocksource/timer-sp804.h
··· 3 3 4 4 struct clk; 5 5 6 - void __sp804_clocksource_and_sched_clock_init(void __iomem *, 7 - const char *, struct clk *, int); 8 - void __sp804_clockevents_init(void __iomem *, unsigned int, 9 - struct clk *, const char *); 6 + int __sp804_clocksource_and_sched_clock_init(void __iomem *, 7 + const char *, struct clk *, int); 8 + int __sp804_clockevents_init(void __iomem *, unsigned int, 9 + struct clk *, const char *); 10 10 void sp804_timer_disable(void __iomem *); 11 11 12 12 static inline void sp804_clocksource_init(void __iomem *base, const char *name)
+3 -3
include/linux/alarmtimer.h
··· 26 26 * struct alarm - Alarm timer structure 27 27 * @node: timerqueue node for adding to the event list this value 28 28 * also includes the expiration time. 29 - * @period: Period for recuring alarms 29 + * @timer: hrtimer used to schedule events while running 30 30 * @function: Function pointer to be executed when the timer fires. 31 - * @type: Alarm type (BOOTTIME/REALTIME) 32 - * @enabled: Flag that represents if the alarm is set to fire or not 31 + * @type: Alarm type (BOOTTIME/REALTIME). 32 + * @state: Flag that represents if the alarm is set to fire or not. 33 33 * @data: Internal data value. 34 34 */ 35 35 struct alarm {
+4
include/linux/clk.h
··· 461 461 return NULL; 462 462 } 463 463 464 + static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) 465 + { 466 + return NULL; 467 + } 464 468 #endif 465 469 466 470 /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
+1 -1
include/linux/clocksource.h
··· 244 244 extern int clocksource_i8253_init(void); 245 245 246 246 #define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ 247 - OF_DECLARE_1(clksrc, name, compat, fn) 247 + OF_DECLARE_1_RET(clksrc, name, compat, fn) 248 248 249 249 #ifdef CONFIG_CLKSRC_PROBE 250 250 extern void clocksource_probe(void);
+10
include/linux/list.h
··· 679 679 } 680 680 681 681 /* 682 + * Check whether the node is the only node of the head without 683 + * accessing head: 684 + */ 685 + static inline bool 686 + hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h) 687 + { 688 + return !n->next && n->pprev == &h->first; 689 + } 690 + 691 + /* 682 692 * Move a list from one list head to another. Fixup the pprev 683 693 * reference of the first entry if it exists. 684 694 */
+3
include/linux/of.h
··· 1009 1009 #endif 1010 1010 1011 1011 typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); 1012 + typedef int (*of_init_fn_1_ret)(struct device_node *); 1012 1013 typedef void (*of_init_fn_1)(struct device_node *); 1013 1014 1014 1015 #define OF_DECLARE_1(table, name, compat, fn) \ 1015 1016 _OF_DECLARE(table, name, compat, fn, of_init_fn_1) 1017 + #define OF_DECLARE_1_RET(table, name, compat, fn) \ 1018 + _OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret) 1016 1019 #define OF_DECLARE_2(table, name, compat, fn) \ 1017 1020 _OF_DECLARE(table, name, compat, fn, of_init_fn_2) 1018 1021
+14 -1
include/linux/time.h
··· 205 205 int tm_yday; 206 206 }; 207 207 208 - void time_to_tm(time_t totalsecs, int offset, struct tm *result); 208 + void time64_to_tm(time64_t totalsecs, int offset, struct tm *result); 209 + 210 + /** 211 + * time_to_tm - converts the calendar time to local broken-down time 212 + * 213 + * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970, 214 + * Coordinated Universal Time (UTC). 215 + * @offset offset seconds adding to totalsecs. 216 + * @result pointer to struct tm variable to receive broken-down time 217 + */ 218 + static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result) 219 + { 220 + time64_to_tm(totalsecs, offset, result); 221 + } 209 222 210 223 /** 211 224 * timespec_to_ns - Convert timespec to nanoseconds
+24 -10
include/linux/timer.h
··· 19 19 void (*function)(unsigned long); 20 20 unsigned long data; 21 21 u32 flags; 22 - int slack; 23 22 24 23 #ifdef CONFIG_TIMER_STATS 25 24 int start_pid; ··· 57 58 * workqueue locking issues. It's not meant for executing random crap 58 59 * with interrupts disabled. Abuse is monitored! 59 60 */ 60 - #define TIMER_CPUMASK 0x0007FFFF 61 - #define TIMER_MIGRATING 0x00080000 61 + #define TIMER_CPUMASK 0x0003FFFF 62 + #define TIMER_MIGRATING 0x00040000 62 63 #define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING) 63 - #define TIMER_DEFERRABLE 0x00100000 64 + #define TIMER_DEFERRABLE 0x00080000 65 + #define TIMER_PINNED 0x00100000 64 66 #define TIMER_IRQSAFE 0x00200000 67 + #define TIMER_ARRAYSHIFT 22 68 + #define TIMER_ARRAYMASK 0xFFC00000 65 69 66 70 #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ 67 71 .entry = { .next = TIMER_ENTRY_STATIC }, \ ··· 72 70 .expires = (_expires), \ 73 71 .data = (_data), \ 74 72 .flags = (_flags), \ 75 - .slack = -1, \ 76 73 __TIMER_LOCKDEP_MAP_INITIALIZER( \ 77 74 __FILE__ ":" __stringify(__LINE__)) \ 78 75 } ··· 79 78 #define TIMER_INITIALIZER(_function, _expires, _data) \ 80 79 __TIMER_INITIALIZER((_function), (_expires), (_data), 0) 81 80 81 + #define TIMER_PINNED_INITIALIZER(_function, _expires, _data) \ 82 + __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED) 83 + 82 84 #define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \ 83 85 __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE) 86 + 87 + #define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data) \ 88 + __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED) 84 89 85 90 #define DEFINE_TIMER(_name, _function, _expires, _data) \ 86 91 struct timer_list _name = \ ··· 131 124 132 125 #define init_timer(timer) \ 133 126 __init_timer((timer), 0) 127 + #define init_timer_pinned(timer) \ 128 + __init_timer((timer), TIMER_PINNED) 134 129 #define init_timer_deferrable(timer) \ 135 130 __init_timer((timer), TIMER_DEFERRABLE) 131 + #define init_timer_pinned_deferrable(timer) \ 132 + __init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED) 136 133 #define init_timer_on_stack(timer) \ 137 134 __init_timer_on_stack((timer), 0) 138 135 ··· 156 145 157 146 #define setup_timer(timer, fn, data) \ 158 147 __setup_timer((timer), (fn), (data), 0) 148 + #define setup_pinned_timer(timer, fn, data) \ 149 + __setup_timer((timer), (fn), (data), TIMER_PINNED) 159 150 #define setup_deferrable_timer(timer, fn, data) \ 160 151 __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE) 152 + #define setup_pinned_deferrable_timer(timer, fn, data) \ 153 + __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) 161 154 #define setup_timer_on_stack(timer, fn, data) \ 162 155 __setup_timer_on_stack((timer), (fn), (data), 0) 156 + #define setup_pinned_timer_on_stack(timer, fn, data) \ 157 + __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED) 163 158 #define setup_deferrable_timer_on_stack(timer, fn, data) \ 164 159 __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE) 160 + #define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \ 161 + __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) 165 162 166 163 /** 167 164 * timer_pending - is a timer pending? ··· 190 171 extern int del_timer(struct timer_list * timer); 191 172 extern int mod_timer(struct timer_list *timer, unsigned long expires); 192 173 extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); 193 - extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires); 194 174 195 - extern void set_timer_slack(struct timer_list *time, int slack_hz); 196 - 197 - #define TIMER_NOT_PINNED 0 198 - #define TIMER_PINNED 1 199 175 /* 200 176 * The jiffies value which is added to now, when there is no timer 201 177 * in the timer wheel:
+10 -14
kernel/signal.c
··· 2751 2751 * @ts: upper bound on process time suspension 2752 2752 */ 2753 2753 int do_sigtimedwait(const sigset_t *which, siginfo_t *info, 2754 - const struct timespec *ts) 2754 + const struct timespec *ts) 2755 2755 { 2756 + ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX }; 2756 2757 struct task_struct *tsk = current; 2757 - long timeout = MAX_SCHEDULE_TIMEOUT; 2758 2758 sigset_t mask = *which; 2759 - int sig; 2759 + int sig, ret = 0; 2760 2760 2761 2761 if (ts) { 2762 2762 if (!timespec_valid(ts)) 2763 2763 return -EINVAL; 2764 - timeout = timespec_to_jiffies(ts); 2765 - /* 2766 - * We can be close to the next tick, add another one 2767 - * to ensure we will wait at least the time asked for. 2768 - */ 2769 - if (ts->tv_sec || ts->tv_nsec) 2770 - timeout++; 2764 + timeout = timespec_to_ktime(*ts); 2765 + to = &timeout; 2771 2766 } 2772 2767 2773 2768 /* ··· 2773 2778 2774 2779 spin_lock_irq(&tsk->sighand->siglock); 2775 2780 sig = dequeue_signal(tsk, &mask, info); 2776 - if (!sig && timeout) { 2781 + if (!sig && timeout.tv64) { 2777 2782 /* 2778 2783 * None ready, temporarily unblock those we're interested 2779 2784 * while we are sleeping in so that we'll be awakened when ··· 2785 2790 recalc_sigpending(); 2786 2791 spin_unlock_irq(&tsk->sighand->siglock); 2787 2792 2788 - timeout = freezable_schedule_timeout_interruptible(timeout); 2789 - 2793 + __set_current_state(TASK_INTERRUPTIBLE); 2794 + ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns, 2795 + HRTIMER_MODE_REL); 2790 2796 spin_lock_irq(&tsk->sighand->siglock); 2791 2797 __set_task_blocked(tsk, &tsk->real_blocked); 2792 2798 sigemptyset(&tsk->real_blocked); ··· 2797 2801 2798 2802 if (sig) 2799 2803 return sig; 2800 - return timeout ? -EINTR : -EAGAIN; 2804 + return ret ? -EINTR : -EAGAIN; 2801 2805 } 2802 2806 2803 2807 /**
-1
kernel/time/alarmtimer.c
··· 30 30 * struct alarm_base - Alarm timer bases 31 31 * @lock: Lock for syncrhonized access to the base 32 32 * @timerqueue: Timerqueue head managing the list of events 33 - * @timer: hrtimer used to schedule events while running 34 33 * @gettime: Function to read the time correlating to the base 35 34 * @base_clockid: clockid for the base 36 35 */
+1 -1
kernel/time/clockevents.c
··· 645 645 #endif 646 646 647 647 #ifdef CONFIG_SYSFS 648 - struct bus_type clockevents_subsys = { 648 + static struct bus_type clockevents_subsys = { 649 649 .name = "clockevents", 650 650 .dev_name = "clockevent", 651 651 };
+5 -3
kernel/time/clocksource.c
··· 669 669 struct list_head *entry = &clocksource_list; 670 670 struct clocksource *tmp; 671 671 672 - list_for_each_entry(tmp, &clocksource_list, list) 672 + list_for_each_entry(tmp, &clocksource_list, list) { 673 673 /* Keep track of the place, where to insert */ 674 - if (tmp->rating >= cs->rating) 675 - entry = &tmp->list; 674 + if (tmp->rating < cs->rating) 675 + break; 676 + entry = &tmp->list; 677 + } 676 678 list_add(&cs->list, entry); 677 679 } 678 680
+1 -1
kernel/time/hrtimer.c
··· 177 177 #endif 178 178 } 179 179 180 - #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 180 + #ifdef CONFIG_NO_HZ_COMMON 181 181 static inline 182 182 struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, 183 183 int pinned)
+8 -8
kernel/time/test_udelay.c
··· 43 43 int allowed_error_ns = usecs * 5; 44 44 45 45 for (i = 0; i < iters; ++i) { 46 - struct timespec ts1, ts2; 46 + s64 kt1, kt2; 47 47 int time_passed; 48 48 49 - ktime_get_ts(&ts1); 49 + kt1 = ktime_get_ns(); 50 50 udelay(usecs); 51 - ktime_get_ts(&ts2); 52 - time_passed = timespec_to_ns(&ts2) - timespec_to_ns(&ts1); 51 + kt2 = ktime_get_ns(); 52 + time_passed = kt2 - kt1; 53 53 54 54 if (i == 0 || time_passed < min) 55 55 min = time_passed; ··· 87 87 if (usecs > 0 && iters > 0) { 88 88 return udelay_test_single(s, usecs, iters); 89 89 } else if (usecs == 0) { 90 - struct timespec ts; 90 + struct timespec64 ts; 91 91 92 - ktime_get_ts(&ts); 93 - seq_printf(s, "udelay() test (lpj=%ld kt=%ld.%09ld)\n", 94 - loops_per_jiffy, ts.tv_sec, ts.tv_nsec); 92 + ktime_get_ts64(&ts); 93 + seq_printf(s, "udelay() test (lpj=%ld kt=%lld.%09ld)\n", 94 + loops_per_jiffy, (s64)ts.tv_sec, ts.tv_nsec); 95 95 seq_puts(s, "usage:\n"); 96 96 seq_puts(s, "echo USECS [ITERS] > " DEBUGFS_FILENAME "\n"); 97 97 seq_puts(s, "cat " DEBUGFS_FILENAME "\n");
+1
kernel/time/tick-broadcast-hrtimer.c
··· 75 75 } 76 76 77 77 static struct clock_event_device ce_broadcast_hrtimer = { 78 + .name = "bc_hrtimer", 78 79 .set_state_shutdown = bc_shutdown, 79 80 .set_next_ktime = bc_set_next, 80 81 .features = CLOCK_EVT_FEAT_ONESHOT |
+1
kernel/time/tick-internal.h
··· 164 164 DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases); 165 165 166 166 extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem); 167 + void timer_clear_idle(void);
+39 -59
kernel/time/tick-sched.c
··· 31 31 #include <trace/events/timer.h> 32 32 33 33 /* 34 - * Per cpu nohz control structure 34 + * Per-CPU nohz control structure 35 35 */ 36 36 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 37 37 ··· 61 61 if (delta.tv64 < tick_period.tv64) 62 62 return; 63 63 64 - /* Reevalute with jiffies_lock held */ 64 + /* Reevaluate with jiffies_lock held */ 65 65 write_seqlock(&jiffies_lock); 66 66 67 67 delta = ktime_sub(now, last_jiffies_update); ··· 116 116 #ifdef CONFIG_NO_HZ_COMMON 117 117 /* 118 118 * Check if the do_timer duty was dropped. We don't care about 119 - * concurrency: This happens only when the cpu in charge went 120 - * into a long sleep. If two cpus happen to assign themself to 119 + * concurrency: This happens only when the CPU in charge went 120 + * into a long sleep. If two CPUs happen to assign themselves to 121 121 * this duty, then the jiffies update is still serialized by 122 122 * jiffies_lock. 123 123 */ ··· 349 349 /* 350 350 * Re-evaluate the need for the tick as we switch the current task. 351 351 * It might need the tick due to per task/process properties: 352 - * perf events, posix cpu timers, ... 352 + * perf events, posix CPU timers, ... 353 353 */ 354 354 void __tick_nohz_task_switch(void) 355 355 { ··· 509 509 * 510 510 * In case the sched_tick was stopped on this CPU, we have to check if jiffies 511 511 * must be updated. Otherwise an interrupt handler could use a stale jiffy 512 - * value. We do this unconditionally on any cpu, as we don't know whether the 513 - * cpu, which has the update task assigned is in a long sleep. 512 + * value. We do this unconditionally on any CPU, as we don't know whether the 513 + * CPU, which has the update task assigned is in a long sleep. 514 514 */ 515 515 static void tick_nohz_update_jiffies(ktime_t now) 516 516 { ··· 526 526 } 527 527 528 528 /* 529 - * Updates the per cpu time idle statistics counters 529 + * Updates the per-CPU time idle statistics counters 530 530 */ 531 531 static void 532 532 update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) ··· 566 566 } 567 567 568 568 /** 569 - * get_cpu_idle_time_us - get the total idle time of a cpu 569 + * get_cpu_idle_time_us - get the total idle time of a CPU 570 570 * @cpu: CPU number to query 571 571 * @last_update_time: variable to store update time in. Do not update 572 572 * counters if NULL. 573 573 * 574 - * Return the cummulative idle time (since boot) for a given 574 + * Return the cumulative idle time (since boot) for a given 575 575 * CPU, in microseconds. 576 576 * 577 577 * This time is measured via accounting rather than sampling, ··· 607 607 EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); 608 608 609 609 /** 610 - * get_cpu_iowait_time_us - get the total iowait time of a cpu 610 + * get_cpu_iowait_time_us - get the total iowait time of a CPU 611 611 * @cpu: CPU number to query 612 612 * @last_update_time: variable to store update time in. Do not update 613 613 * counters if NULL. 614 614 * 615 - * Return the cummulative iowait time (since boot) for a given 615 + * Return the cumulative iowait time (since boot) for a given 616 616 * CPU, in microseconds. 617 617 * 618 618 * This time is measured via accounting rather than sampling, ··· 700 700 delta = next_tick - basemono; 701 701 if (delta <= (u64)TICK_NSEC) { 702 702 tick.tv64 = 0; 703 + 704 + /* 705 + * Tell the timer code that the base is not idle, i.e. undo 706 + * the effect of get_next_timer_interrupt(): 707 + */ 708 + timer_clear_idle(); 703 709 /* 704 710 * We've not stopped the tick yet, and there's a timer in the 705 711 * next period, so no point in stopping it either, bail. ··· 732 726 } 733 727 734 728 /* 735 - * If this cpu is the one which updates jiffies, then give up 736 - * the assignment and let it be taken by the cpu which runs 737 - * the tick timer next, which might be this cpu as well. If we 729 + * If this CPU is the one which updates jiffies, then give up 730 + * the assignment and let it be taken by the CPU which runs 731 + * the tick timer next, which might be this CPU as well. If we 738 732 * don't drop this here the jiffies might be stale and 739 733 * do_timer() never invoked. Keep track of the fact that it 740 - * was the one which had the do_timer() duty last. If this cpu 734 + * was the one which had the do_timer() duty last. If this CPU 741 735 * is the one which had the do_timer() duty last, we limit the 742 - * sleep time to the timekeeping max_deferement value. 736 + * sleep time to the timekeeping max_deferment value. 743 737 * Otherwise we can sleep as long as we want. 744 738 */ 745 739 delta = timekeeping_max_deferment(); ··· 815 809 tick_do_update_jiffies64(now); 816 810 cpu_load_update_nohz_stop(); 817 811 812 + /* 813 + * Clear the timer idle flag, so we avoid IPIs on remote queueing and 814 + * the clock forward checks in the enqueue path: 815 + */ 816 + timer_clear_idle(); 817 + 818 818 calc_load_exit_idle(); 819 819 touch_softlockup_watchdog_sched(); 820 820 /* ··· 853 841 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) 854 842 { 855 843 /* 856 - * If this cpu is offline and it is the one which updates 844 + * If this CPU is offline and it is the one which updates 857 845 * jiffies, then give up the assignment and let it be taken by 858 - * the cpu which runs the tick timer next. If we don't drop 846 + * the CPU which runs the tick timer next. If we don't drop 859 847 * this here the jiffies might be stale and do_timer() never 860 848 * invoked. 861 849 */ ··· 908 896 ktime_t now, expires; 909 897 int cpu = smp_processor_id(); 910 898 911 - now = tick_nohz_start_idle(ts); 912 - 913 899 if (can_stop_idle_tick(cpu, ts)) { 914 900 int was_stopped = ts->tick_stopped; 915 901 902 + now = tick_nohz_start_idle(ts); 916 903 ts->idle_calls++; 917 904 918 905 expires = tick_nohz_stop_sched_tick(ts, now, cpu); ··· 944 933 WARN_ON_ONCE(irqs_disabled()); 945 934 946 935 /* 947 - * Update the idle state in the scheduler domain hierarchy 948 - * when tick_nohz_stop_sched_tick() is called from the idle loop. 949 - * State will be updated to busy during the first busy tick after 950 - * exiting idle. 951 - */ 936 + * Update the idle state in the scheduler domain hierarchy 937 + * when tick_nohz_stop_sched_tick() is called from the idle loop. 938 + * State will be updated to busy during the first busy tick after 939 + * exiting idle. 940 + */ 952 941 set_cpu_sd_state_idle(); 953 942 954 943 local_irq_disable(); ··· 1103 1092 tick_nohz_activate(ts, NOHZ_MODE_LOWRES); 1104 1093 } 1105 1094 1106 - /* 1107 - * When NOHZ is enabled and the tick is stopped, we need to kick the 1108 - * tick timer from irq_enter() so that the jiffies update is kept 1109 - * alive during long running softirqs. That's ugly as hell, but 1110 - * correctness is key even if we need to fix the offending softirq in 1111 - * the first place. 1112 - * 1113 - * Note, this is different to tick_nohz_restart. We just kick the 1114 - * timer and do not touch the other magic bits which need to be done 1115 - * when idle is left. 1116 - */ 1117 - static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now) 1118 - { 1119 - #if 0 1120 - /* Switch back to 2.6.27 behaviour */ 1121 - ktime_t delta; 1122 - 1123 - /* 1124 - * Do not touch the tick device, when the next expiry is either 1125 - * already reached or less/equal than the tick period. 1126 - */ 1127 - delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); 1128 - if (delta.tv64 <= tick_period.tv64) 1129 - return; 1130 - 1131 - tick_nohz_restart(ts, now); 1132 - #endif 1133 - } 1134 - 1135 1095 static inline void tick_nohz_irq_enter(void) 1136 1096 { 1137 1097 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); ··· 1113 1131 now = ktime_get(); 1114 1132 if (ts->idle_active) 1115 1133 tick_nohz_stop_idle(ts, now); 1116 - if (ts->tick_stopped) { 1134 + if (ts->tick_stopped) 1117 1135 tick_nohz_update_jiffies(now); 1118 - tick_nohz_kick_tick(ts, now); 1119 - } 1120 1136 } 1121 1137 1122 1138 #else ··· 1191 1211 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1192 1212 ts->sched_timer.function = tick_sched_timer; 1193 1213 1194 - /* Get the next period (per cpu) */ 1214 + /* Get the next period (per-CPU) */ 1195 1215 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); 1196 1216 1197 1217 /* Offset the tick to avert jiffies_lock contention. */
+6 -5
kernel/time/timeconv.c
··· 67 67 #define SECS_PER_DAY (SECS_PER_HOUR * 24) 68 68 69 69 /** 70 - * time_to_tm - converts the calendar time to local broken-down time 70 + * time64_to_tm - converts the calendar time to local broken-down time 71 71 * 72 72 * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970, 73 73 * Coordinated Universal Time (UTC). 74 74 * @offset offset seconds adding to totalsecs. 75 75 * @result pointer to struct tm variable to receive broken-down time 76 76 */ 77 - void time_to_tm(time_t totalsecs, int offset, struct tm *result) 77 + void time64_to_tm(time64_t totalsecs, int offset, struct tm *result) 78 78 { 79 79 long days, rem, y; 80 + int remainder; 80 81 const unsigned short *ip; 81 82 82 - days = totalsecs / SECS_PER_DAY; 83 - rem = totalsecs % SECS_PER_DAY; 83 + days = div_s64_rem(totalsecs, SECS_PER_DAY, &remainder); 84 + rem = remainder; 84 85 rem += offset; 85 86 while (rem < 0) { 86 87 rem += SECS_PER_DAY; ··· 125 124 result->tm_mon = y; 126 125 result->tm_mday = days + 1; 127 126 } 128 - EXPORT_SYMBOL(time_to_tm); 127 + EXPORT_SYMBOL(time64_to_tm);
+6 -4
kernel/time/timekeeping.c
··· 480 480 * users are removed, this can be killed. 481 481 */ 482 482 remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1); 483 - tk->tkr_mono.xtime_nsec -= remainder; 484 - tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift; 485 - tk->ntp_error += remainder << tk->ntp_error_shift; 486 - tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift; 483 + if (remainder != 0) { 484 + tk->tkr_mono.xtime_nsec -= remainder; 485 + tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift; 486 + tk->ntp_error += remainder << tk->ntp_error_shift; 487 + tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift; 488 + } 487 489 } 488 490 #else 489 491 #define old_vsyscall_fixup(tk)
+672 -443
kernel/time/timer.c
··· 59 59 EXPORT_SYMBOL(jiffies_64); 60 60 61 61 /* 62 - * per-CPU timer vector definitions: 62 + * The timer wheel has LVL_DEPTH array levels. Each level provides an array of 63 + * LVL_SIZE buckets. Each level is driven by its own clock and therefor each 64 + * level has a different granularity. 65 + * 66 + * The level granularity is: LVL_CLK_DIV ^ lvl 67 + * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level) 68 + * 69 + * The array level of a newly armed timer depends on the relative expiry 70 + * time. The farther the expiry time is away the higher the array level and 71 + * therefor the granularity becomes. 72 + * 73 + * Contrary to the original timer wheel implementation, which aims for 'exact' 74 + * expiry of the timers, this implementation removes the need for recascading 75 + * the timers into the lower array levels. The previous 'classic' timer wheel 76 + * implementation of the kernel already violated the 'exact' expiry by adding 77 + * slack to the expiry time to provide batched expiration. The granularity 78 + * levels provide implicit batching. 79 + * 80 + * This is an optimization of the original timer wheel implementation for the 81 + * majority of the timer wheel use cases: timeouts. The vast majority of 82 + * timeout timers (networking, disk I/O ...) are canceled before expiry. If 83 + * the timeout expires it indicates that normal operation is disturbed, so it 84 + * does not matter much whether the timeout comes with a slight delay. 85 + * 86 + * The only exception to this are networking timers with a small expiry 87 + * time. They rely on the granularity. Those fit into the first wheel level, 88 + * which has HZ granularity. 89 + * 90 + * We don't have cascading anymore. timers with a expiry time above the 91 + * capacity of the last wheel level are force expired at the maximum timeout 92 + * value of the last wheel level. From data sampling we know that the maximum 93 + * value observed is 5 days (network connection tracking), so this should not 94 + * be an issue. 95 + * 96 + * The currently chosen array constants values are a good compromise between 97 + * array size and granularity. 98 + * 99 + * This results in the following granularity and range levels: 100 + * 101 + * HZ 1000 steps 102 + * Level Offset Granularity Range 103 + * 0 0 1 ms 0 ms - 63 ms 104 + * 1 64 8 ms 64 ms - 511 ms 105 + * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s) 106 + * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s) 107 + * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m) 108 + * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m) 109 + * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h) 110 + * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d) 111 + * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d) 112 + * 113 + * HZ 300 114 + * Level Offset Granularity Range 115 + * 0 0 3 ms 0 ms - 210 ms 116 + * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s) 117 + * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s) 118 + * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m) 119 + * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m) 120 + * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h) 121 + * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h) 122 + * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d) 123 + * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d) 124 + * 125 + * HZ 250 126 + * Level Offset Granularity Range 127 + * 0 0 4 ms 0 ms - 255 ms 128 + * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s) 129 + * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s) 130 + * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m) 131 + * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m) 132 + * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h) 133 + * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h) 134 + * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d) 135 + * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d) 136 + * 137 + * HZ 100 138 + * Level Offset Granularity Range 139 + * 0 0 10 ms 0 ms - 630 ms 140 + * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s) 141 + * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s) 142 + * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m) 143 + * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m) 144 + * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h) 145 + * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d) 146 + * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d) 63 147 */ 64 - #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) 65 - #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) 66 - #define TVN_SIZE (1 << TVN_BITS) 67 - #define TVR_SIZE (1 << TVR_BITS) 68 - #define TVN_MASK (TVN_SIZE - 1) 69 - #define TVR_MASK (TVR_SIZE - 1) 70 - #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1)) 71 148 72 - struct tvec { 73 - struct hlist_head vec[TVN_SIZE]; 74 - }; 149 + /* Clock divisor for the next level */ 150 + #define LVL_CLK_SHIFT 3 151 + #define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT) 152 + #define LVL_CLK_MASK (LVL_CLK_DIV - 1) 153 + #define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT) 154 + #define LVL_GRAN(n) (1UL << LVL_SHIFT(n)) 75 155 76 - struct tvec_root { 77 - struct hlist_head vec[TVR_SIZE]; 78 - }; 156 + /* 157 + * The time start value for each level to select the bucket at enqueue 158 + * time. 159 + */ 160 + #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT)) 79 161 80 - struct tvec_base { 81 - spinlock_t lock; 82 - struct timer_list *running_timer; 83 - unsigned long timer_jiffies; 84 - unsigned long next_timer; 85 - unsigned long active_timers; 86 - unsigned long all_timers; 87 - int cpu; 88 - bool migration_enabled; 89 - bool nohz_active; 90 - struct tvec_root tv1; 91 - struct tvec tv2; 92 - struct tvec tv3; 93 - struct tvec tv4; 94 - struct tvec tv5; 162 + /* Size of each clock level */ 163 + #define LVL_BITS 6 164 + #define LVL_SIZE (1UL << LVL_BITS) 165 + #define LVL_MASK (LVL_SIZE - 1) 166 + #define LVL_OFFS(n) ((n) * LVL_SIZE) 167 + 168 + /* Level depth */ 169 + #if HZ > 100 170 + # define LVL_DEPTH 9 171 + # else 172 + # define LVL_DEPTH 8 173 + #endif 174 + 175 + /* The cutoff (max. capacity of the wheel) */ 176 + #define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH)) 177 + #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1)) 178 + 179 + /* 180 + * The resulting wheel size. If NOHZ is configured we allocate two 181 + * wheels so we have a separate storage for the deferrable timers. 182 + */ 183 + #define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH) 184 + 185 + #ifdef CONFIG_NO_HZ_COMMON 186 + # define NR_BASES 2 187 + # define BASE_STD 0 188 + # define BASE_DEF 1 189 + #else 190 + # define NR_BASES 1 191 + # define BASE_STD 0 192 + # define BASE_DEF 0 193 + #endif 194 + 195 + struct timer_base { 196 + spinlock_t lock; 197 + struct timer_list *running_timer; 198 + unsigned long clk; 199 + unsigned long next_expiry; 200 + unsigned int cpu; 201 + bool migration_enabled; 202 + bool nohz_active; 203 + bool is_idle; 204 + DECLARE_BITMAP(pending_map, WHEEL_SIZE); 205 + struct hlist_head vectors[WHEEL_SIZE]; 95 206 } ____cacheline_aligned; 96 207 97 - 98 - static DEFINE_PER_CPU(struct tvec_base, tvec_bases); 208 + static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); 99 209 100 210 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 101 211 unsigned int sysctl_timer_migration = 1; ··· 216 106 unsigned int cpu; 217 107 218 108 /* Avoid the loop, if nothing to update */ 219 - if (this_cpu_read(tvec_bases.migration_enabled) == on) 109 + if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on) 220 110 return; 221 111 222 112 for_each_possible_cpu(cpu) { 223 - per_cpu(tvec_bases.migration_enabled, cpu) = on; 113 + per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on; 114 + per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on; 224 115 per_cpu(hrtimer_bases.migration_enabled, cpu) = on; 225 116 if (!update_nohz) 226 117 continue; 227 - per_cpu(tvec_bases.nohz_active, cpu) = true; 118 + per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true; 119 + per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true; 228 120 per_cpu(hrtimer_bases.nohz_active, cpu) = true; 229 121 } 230 122 } ··· 244 132 timers_update_migration(false); 245 133 mutex_unlock(&mutex); 246 134 return ret; 247 - } 248 - 249 - static inline struct tvec_base *get_target_base(struct tvec_base *base, 250 - int pinned) 251 - { 252 - if (pinned || !base->migration_enabled) 253 - return this_cpu_ptr(&tvec_bases); 254 - return per_cpu_ptr(&tvec_bases, get_nohz_timer_target()); 255 - } 256 - #else 257 - static inline struct tvec_base *get_target_base(struct tvec_base *base, 258 - int pinned) 259 - { 260 - return this_cpu_ptr(&tvec_bases); 261 135 } 262 136 #endif 263 137 ··· 449 351 } 450 352 EXPORT_SYMBOL_GPL(round_jiffies_up_relative); 451 353 452 - /** 453 - * set_timer_slack - set the allowed slack for a timer 454 - * @timer: the timer to be modified 455 - * @slack_hz: the amount of time (in jiffies) allowed for rounding 456 - * 457 - * Set the amount of time, in jiffies, that a certain timer has 458 - * in terms of slack. By setting this value, the timer subsystem 459 - * will schedule the actual timer somewhere between 460 - * the time mod_timer() asks for, and that time plus the slack. 461 - * 462 - * By setting the slack to -1, a percentage of the delay is used 463 - * instead. 464 - */ 465 - void set_timer_slack(struct timer_list *timer, int slack_hz) 354 + 355 + static inline unsigned int timer_get_idx(struct timer_list *timer) 466 356 { 467 - timer->slack = slack_hz; 357 + return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT; 468 358 } 469 - EXPORT_SYMBOL_GPL(set_timer_slack); 359 + 360 + static inline void timer_set_idx(struct timer_list *timer, unsigned int idx) 361 + { 362 + timer->flags = (timer->flags & ~TIMER_ARRAYMASK) | 363 + idx << TIMER_ARRAYSHIFT; 364 + } 365 + 366 + /* 367 + * Helper function to calculate the array index for a given expiry 368 + * time. 369 + */ 370 + static inline unsigned calc_index(unsigned expires, unsigned lvl) 371 + { 372 + expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl); 373 + return LVL_OFFS(lvl) + (expires & LVL_MASK); 374 + } 375 + 376 + static int calc_wheel_index(unsigned long expires, unsigned long clk) 377 + { 378 + unsigned long delta = expires - clk; 379 + unsigned int idx; 380 + 381 + if (delta < LVL_START(1)) { 382 + idx = calc_index(expires, 0); 383 + } else if (delta < LVL_START(2)) { 384 + idx = calc_index(expires, 1); 385 + } else if (delta < LVL_START(3)) { 386 + idx = calc_index(expires, 2); 387 + } else if (delta < LVL_START(4)) { 388 + idx = calc_index(expires, 3); 389 + } else if (delta < LVL_START(5)) { 390 + idx = calc_index(expires, 4); 391 + } else if (delta < LVL_START(6)) { 392 + idx = calc_index(expires, 5); 393 + } else if (delta < LVL_START(7)) { 394 + idx = calc_index(expires, 6); 395 + } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) { 396 + idx = calc_index(expires, 7); 397 + } else if ((long) delta < 0) { 398 + idx = clk & LVL_MASK; 399 + } else { 400 + /* 401 + * Force expire obscene large timeouts to expire at the 402 + * capacity limit of the wheel. 403 + */ 404 + if (expires >= WHEEL_TIMEOUT_CUTOFF) 405 + expires = WHEEL_TIMEOUT_MAX; 406 + 407 + idx = calc_index(expires, LVL_DEPTH - 1); 408 + } 409 + return idx; 410 + } 411 + 412 + /* 413 + * Enqueue the timer into the hash bucket, mark it pending in 414 + * the bitmap and store the index in the timer flags. 415 + */ 416 + static void enqueue_timer(struct timer_base *base, struct timer_list *timer, 417 + unsigned int idx) 418 + { 419 + hlist_add_head(&timer->entry, base->vectors + idx); 420 + __set_bit(idx, base->pending_map); 421 + timer_set_idx(timer, idx); 422 + } 470 423 471 424 static void 472 - __internal_add_timer(struct tvec_base *base, struct timer_list *timer) 425 + __internal_add_timer(struct timer_base *base, struct timer_list *timer) 473 426 { 474 - unsigned long expires = timer->expires; 475 - unsigned long idx = expires - base->timer_jiffies; 476 - struct hlist_head *vec; 427 + unsigned int idx; 477 428 478 - if (idx < TVR_SIZE) { 479 - int i = expires & TVR_MASK; 480 - vec = base->tv1.vec + i; 481 - } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { 482 - int i = (expires >> TVR_BITS) & TVN_MASK; 483 - vec = base->tv2.vec + i; 484 - } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { 485 - int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; 486 - vec = base->tv3.vec + i; 487 - } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { 488 - int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; 489 - vec = base->tv4.vec + i; 490 - } else if ((signed long) idx < 0) { 491 - /* 492 - * Can happen if you add a timer with expires == jiffies, 493 - * or you set a timer to go off in the past 494 - */ 495 - vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); 496 - } else { 497 - int i; 498 - /* If the timeout is larger than MAX_TVAL (on 64-bit 499 - * architectures or with CONFIG_BASE_SMALL=1) then we 500 - * use the maximum timeout. 501 - */ 502 - if (idx > MAX_TVAL) { 503 - idx = MAX_TVAL; 504 - expires = idx + base->timer_jiffies; 505 - } 506 - i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; 507 - vec = base->tv5.vec + i; 508 - } 509 - 510 - hlist_add_head(&timer->entry, vec); 429 + idx = calc_wheel_index(timer->expires, base->clk); 430 + enqueue_timer(base, timer, idx); 511 431 } 512 432 513 - static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) 433 + static void 434 + trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) 514 435 { 515 - /* Advance base->jiffies, if the base is empty */ 516 - if (!base->all_timers++) 517 - base->timer_jiffies = jiffies; 518 - 519 - __internal_add_timer(base, timer); 520 - /* 521 - * Update base->active_timers and base->next_timer 522 - */ 523 - if (!(timer->flags & TIMER_DEFERRABLE)) { 524 - if (!base->active_timers++ || 525 - time_before(timer->expires, base->next_timer)) 526 - base->next_timer = timer->expires; 527 - } 436 + if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active) 437 + return; 528 438 529 439 /* 530 - * Check whether the other CPU is in dynticks mode and needs 531 - * to be triggered to reevaluate the timer wheel. 532 - * We are protected against the other CPU fiddling 533 - * with the timer by holding the timer base lock. This also 534 - * makes sure that a CPU on the way to stop its tick can not 535 - * evaluate the timer wheel. 536 - * 537 - * Spare the IPI for deferrable timers on idle targets though. 538 - * The next busy ticks will take care of it. Except full dynticks 539 - * require special care against races with idle_cpu(), lets deal 540 - * with that later. 440 + * TODO: This wants some optimizing similar to the code below, but we 441 + * will do that when we switch from push to pull for deferrable timers. 541 442 */ 542 - if (base->nohz_active) { 543 - if (!(timer->flags & TIMER_DEFERRABLE) || 544 - tick_nohz_full_cpu(base->cpu)) 443 + if (timer->flags & TIMER_DEFERRABLE) { 444 + if (tick_nohz_full_cpu(base->cpu)) 545 445 wake_up_nohz_cpu(base->cpu); 446 + return; 546 447 } 448 + 449 + /* 450 + * We might have to IPI the remote CPU if the base is idle and the 451 + * timer is not deferrable. If the other CPU is on the way to idle 452 + * then it can't set base->is_idle as we hold the base lock: 453 + */ 454 + if (!base->is_idle) 455 + return; 456 + 457 + /* Check whether this is the new first expiring timer: */ 458 + if (time_after_eq(timer->expires, base->next_expiry)) 459 + return; 460 + 461 + /* 462 + * Set the next expiry time and kick the CPU so it can reevaluate the 463 + * wheel: 464 + */ 465 + base->next_expiry = timer->expires; 466 + wake_up_nohz_cpu(base->cpu); 467 + } 468 + 469 + static void 470 + internal_add_timer(struct timer_base *base, struct timer_list *timer) 471 + { 472 + __internal_add_timer(base, timer); 473 + trigger_dyntick_cpu(base, timer); 547 474 } 548 475 549 476 #ifdef CONFIG_TIMER_STATS ··· 789 666 { 790 667 timer->entry.pprev = NULL; 791 668 timer->flags = flags | raw_smp_processor_id(); 792 - timer->slack = -1; 793 669 #ifdef CONFIG_TIMER_STATS 794 670 timer->start_site = NULL; 795 671 timer->start_pid = -1; ··· 828 706 entry->next = LIST_POISON2; 829 707 } 830 708 831 - static inline void 832 - detach_expired_timer(struct timer_list *timer, struct tvec_base *base) 833 - { 834 - detach_timer(timer, true); 835 - if (!(timer->flags & TIMER_DEFERRABLE)) 836 - base->active_timers--; 837 - base->all_timers--; 838 - } 839 - 840 - static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, 709 + static int detach_if_pending(struct timer_list *timer, struct timer_base *base, 841 710 bool clear_pending) 842 711 { 712 + unsigned idx = timer_get_idx(timer); 713 + 843 714 if (!timer_pending(timer)) 844 715 return 0; 845 716 717 + if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) 718 + __clear_bit(idx, base->pending_map); 719 + 846 720 detach_timer(timer, clear_pending); 847 - if (!(timer->flags & TIMER_DEFERRABLE)) { 848 - base->active_timers--; 849 - if (timer->expires == base->next_timer) 850 - base->next_timer = base->timer_jiffies; 851 - } 852 - /* If this was the last timer, advance base->jiffies */ 853 - if (!--base->all_timers) 854 - base->timer_jiffies = jiffies; 855 721 return 1; 856 722 } 857 723 724 + static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) 725 + { 726 + struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); 727 + 728 + /* 729 + * If the timer is deferrable and nohz is active then we need to use 730 + * the deferrable base. 731 + */ 732 + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active && 733 + (tflags & TIMER_DEFERRABLE)) 734 + base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); 735 + return base; 736 + } 737 + 738 + static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) 739 + { 740 + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 741 + 742 + /* 743 + * If the timer is deferrable and nohz is active then we need to use 744 + * the deferrable base. 745 + */ 746 + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active && 747 + (tflags & TIMER_DEFERRABLE)) 748 + base = this_cpu_ptr(&timer_bases[BASE_DEF]); 749 + return base; 750 + } 751 + 752 + static inline struct timer_base *get_timer_base(u32 tflags) 753 + { 754 + return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK); 755 + } 756 + 757 + #ifdef CONFIG_NO_HZ_COMMON 758 + static inline struct timer_base * 759 + __get_target_base(struct timer_base *base, unsigned tflags) 760 + { 761 + #ifdef CONFIG_SMP 762 + if ((tflags & TIMER_PINNED) || !base->migration_enabled) 763 + return get_timer_this_cpu_base(tflags); 764 + return get_timer_cpu_base(tflags, get_nohz_timer_target()); 765 + #else 766 + return get_timer_this_cpu_base(tflags); 767 + #endif 768 + } 769 + 770 + static inline void forward_timer_base(struct timer_base *base) 771 + { 772 + /* 773 + * We only forward the base when it's idle and we have a delta between 774 + * base clock and jiffies. 775 + */ 776 + if (!base->is_idle || (long) (jiffies - base->clk) < 2) 777 + return; 778 + 779 + /* 780 + * If the next expiry value is > jiffies, then we fast forward to 781 + * jiffies otherwise we forward to the next expiry value. 782 + */ 783 + if (time_after(base->next_expiry, jiffies)) 784 + base->clk = jiffies; 785 + else 786 + base->clk = base->next_expiry; 787 + } 788 + #else 789 + static inline struct timer_base * 790 + __get_target_base(struct timer_base *base, unsigned tflags) 791 + { 792 + return get_timer_this_cpu_base(tflags); 793 + } 794 + 795 + static inline void forward_timer_base(struct timer_base *base) { } 796 + #endif 797 + 798 + static inline struct timer_base * 799 + get_target_base(struct timer_base *base, unsigned tflags) 800 + { 801 + struct timer_base *target = __get_target_base(base, tflags); 802 + 803 + forward_timer_base(target); 804 + return target; 805 + } 806 + 858 807 /* 859 - * We are using hashed locking: holding per_cpu(tvec_bases).lock 860 - * means that all timers which are tied to this base via timer->base are 861 - * locked, and the base itself is locked too. 808 + * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means 809 + * that all timers which are tied to this base are locked, and the base itself 810 + * is locked too. 862 811 * 863 812 * So __run_timers/migrate_timers can safely modify all timers which could 864 - * be found on ->tvX lists. 813 + * be found in the base->vectors array. 865 814 * 866 - * When the timer's base is locked and removed from the list, the 867 - * TIMER_MIGRATING flag is set, FIXME 815 + * When a timer is migrating then the TIMER_MIGRATING flag is set and we need 816 + * to wait until the migration is done. 868 817 */ 869 - static struct tvec_base *lock_timer_base(struct timer_list *timer, 870 - unsigned long *flags) 818 + static struct timer_base *lock_timer_base(struct timer_list *timer, 819 + unsigned long *flags) 871 820 __acquires(timer->base->lock) 872 821 { 873 822 for (;;) { 823 + struct timer_base *base; 874 824 u32 tf = timer->flags; 875 - struct tvec_base *base; 876 825 877 826 if (!(tf & TIMER_MIGRATING)) { 878 - base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK); 827 + base = get_timer_base(tf); 879 828 spin_lock_irqsave(&base->lock, *flags); 880 829 if (timer->flags == tf) 881 830 return base; ··· 957 764 } 958 765 959 766 static inline int 960 - __mod_timer(struct timer_list *timer, unsigned long expires, 961 - bool pending_only, int pinned) 767 + __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) 962 768 { 963 - struct tvec_base *base, *new_base; 964 - unsigned long flags; 769 + struct timer_base *base, *new_base; 770 + unsigned int idx = UINT_MAX; 771 + unsigned long clk = 0, flags; 965 772 int ret = 0; 773 + 774 + /* 775 + * This is a common optimization triggered by the networking code - if 776 + * the timer is re-modified to have the same timeout or ends up in the 777 + * same array bucket then just return: 778 + */ 779 + if (timer_pending(timer)) { 780 + if (timer->expires == expires) 781 + return 1; 782 + /* 783 + * Take the current timer_jiffies of base, but without holding 784 + * the lock! 785 + */ 786 + base = get_timer_base(timer->flags); 787 + clk = base->clk; 788 + 789 + idx = calc_wheel_index(expires, clk); 790 + 791 + /* 792 + * Retrieve and compare the array index of the pending 793 + * timer. If it matches set the expiry to the new value so a 794 + * subsequent call will exit in the expires check above. 795 + */ 796 + if (idx == timer_get_idx(timer)) { 797 + timer->expires = expires; 798 + return 1; 799 + } 800 + } 966 801 967 802 timer_stats_timer_set_start_info(timer); 968 803 BUG_ON(!timer->function); ··· 1003 782 1004 783 debug_activate(timer, expires); 1005 784 1006 - new_base = get_target_base(base, pinned); 785 + new_base = get_target_base(base, timer->flags); 1007 786 1008 787 if (base != new_base) { 1009 788 /* 1010 - * We are trying to schedule the timer on the local CPU. 789 + * We are trying to schedule the timer on the new base. 1011 790 * However we can't change timer's base while it is running, 1012 791 * otherwise del_timer_sync() can't detect that the timer's 1013 - * handler yet has not finished. This also guarantees that 1014 - * the timer is serialized wrt itself. 792 + * handler yet has not finished. This also guarantees that the 793 + * timer is serialized wrt itself. 1015 794 */ 1016 795 if (likely(base->running_timer != timer)) { 1017 796 /* See the comment in lock_timer_base() */ ··· 1026 805 } 1027 806 1028 807 timer->expires = expires; 1029 - internal_add_timer(base, timer); 808 + /* 809 + * If 'idx' was calculated above and the base time did not advance 810 + * between calculating 'idx' and taking the lock, only enqueue_timer() 811 + * and trigger_dyntick_cpu() is required. Otherwise we need to 812 + * (re)calculate the wheel index via internal_add_timer(). 813 + */ 814 + if (idx != UINT_MAX && clk == base->clk) { 815 + enqueue_timer(base, timer, idx); 816 + trigger_dyntick_cpu(base, timer); 817 + } else { 818 + internal_add_timer(base, timer); 819 + } 1030 820 1031 821 out_unlock: 1032 822 spin_unlock_irqrestore(&base->lock, flags); ··· 1057 825 */ 1058 826 int mod_timer_pending(struct timer_list *timer, unsigned long expires) 1059 827 { 1060 - return __mod_timer(timer, expires, true, TIMER_NOT_PINNED); 828 + return __mod_timer(timer, expires, true); 1061 829 } 1062 830 EXPORT_SYMBOL(mod_timer_pending); 1063 - 1064 - /* 1065 - * Decide where to put the timer while taking the slack into account 1066 - * 1067 - * Algorithm: 1068 - * 1) calculate the maximum (absolute) time 1069 - * 2) calculate the highest bit where the expires and new max are different 1070 - * 3) use this bit to make a mask 1071 - * 4) use the bitmask to round down the maximum time, so that all last 1072 - * bits are zeros 1073 - */ 1074 - static inline 1075 - unsigned long apply_slack(struct timer_list *timer, unsigned long expires) 1076 - { 1077 - unsigned long expires_limit, mask; 1078 - int bit; 1079 - 1080 - if (timer->slack >= 0) { 1081 - expires_limit = expires + timer->slack; 1082 - } else { 1083 - long delta = expires - jiffies; 1084 - 1085 - if (delta < 256) 1086 - return expires; 1087 - 1088 - expires_limit = expires + delta / 256; 1089 - } 1090 - mask = expires ^ expires_limit; 1091 - if (mask == 0) 1092 - return expires; 1093 - 1094 - bit = __fls(mask); 1095 - 1096 - mask = (1UL << bit) - 1; 1097 - 1098 - expires_limit = expires_limit & ~(mask); 1099 - 1100 - return expires_limit; 1101 - } 1102 831 1103 832 /** 1104 833 * mod_timer - modify a timer's timeout ··· 1083 890 */ 1084 891 int mod_timer(struct timer_list *timer, unsigned long expires) 1085 892 { 1086 - expires = apply_slack(timer, expires); 1087 - 1088 - /* 1089 - * This is a common optimization triggered by the 1090 - * networking code - if the timer is re-modified 1091 - * to be the same thing then just return: 1092 - */ 1093 - if (timer_pending(timer) && timer->expires == expires) 1094 - return 1; 1095 - 1096 - return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); 893 + return __mod_timer(timer, expires, false); 1097 894 } 1098 895 EXPORT_SYMBOL(mod_timer); 1099 - 1100 - /** 1101 - * mod_timer_pinned - modify a timer's timeout 1102 - * @timer: the timer to be modified 1103 - * @expires: new timeout in jiffies 1104 - * 1105 - * mod_timer_pinned() is a way to update the expire field of an 1106 - * active timer (if the timer is inactive it will be activated) 1107 - * and to ensure that the timer is scheduled on the current CPU. 1108 - * 1109 - * Note that this does not prevent the timer from being migrated 1110 - * when the current CPU goes offline. If this is a problem for 1111 - * you, use CPU-hotplug notifiers to handle it correctly, for 1112 - * example, cancelling the timer when the corresponding CPU goes 1113 - * offline. 1114 - * 1115 - * mod_timer_pinned(timer, expires) is equivalent to: 1116 - * 1117 - * del_timer(timer); timer->expires = expires; add_timer(timer); 1118 - */ 1119 - int mod_timer_pinned(struct timer_list *timer, unsigned long expires) 1120 - { 1121 - if (timer->expires == expires && timer_pending(timer)) 1122 - return 1; 1123 - 1124 - return __mod_timer(timer, expires, false, TIMER_PINNED); 1125 - } 1126 - EXPORT_SYMBOL(mod_timer_pinned); 1127 896 1128 897 /** 1129 898 * add_timer - start a timer ··· 1117 962 */ 1118 963 void add_timer_on(struct timer_list *timer, int cpu) 1119 964 { 1120 - struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu); 1121 - struct tvec_base *base; 965 + struct timer_base *new_base, *base; 1122 966 unsigned long flags; 1123 967 1124 968 timer_stats_timer_set_start_info(timer); 1125 969 BUG_ON(timer_pending(timer) || !timer->function); 970 + 971 + new_base = get_timer_cpu_base(timer->flags, cpu); 1126 972 1127 973 /* 1128 974 * If @timer was on a different CPU, it should be migrated with the ··· 1160 1004 */ 1161 1005 int del_timer(struct timer_list *timer) 1162 1006 { 1163 - struct tvec_base *base; 1007 + struct timer_base *base; 1164 1008 unsigned long flags; 1165 1009 int ret = 0; 1166 1010 ··· 1186 1030 */ 1187 1031 int try_to_del_timer_sync(struct timer_list *timer) 1188 1032 { 1189 - struct tvec_base *base; 1033 + struct timer_base *base; 1190 1034 unsigned long flags; 1191 1035 int ret = -1; 1192 1036 ··· 1270 1114 EXPORT_SYMBOL(del_timer_sync); 1271 1115 #endif 1272 1116 1273 - static int cascade(struct tvec_base *base, struct tvec *tv, int index) 1274 - { 1275 - /* cascade all the timers from tv up one level */ 1276 - struct timer_list *timer; 1277 - struct hlist_node *tmp; 1278 - struct hlist_head tv_list; 1279 - 1280 - hlist_move_list(tv->vec + index, &tv_list); 1281 - 1282 - /* 1283 - * We are removing _all_ timers from the list, so we 1284 - * don't have to detach them individually. 1285 - */ 1286 - hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) { 1287 - /* No accounting, while moving them */ 1288 - __internal_add_timer(base, timer); 1289 - } 1290 - 1291 - return index; 1292 - } 1293 - 1294 1117 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), 1295 1118 unsigned long data) 1296 1119 { ··· 1313 1178 } 1314 1179 } 1315 1180 1316 - #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) 1317 - 1318 - /** 1319 - * __run_timers - run all expired timers (if any) on this CPU. 1320 - * @base: the timer vector to be processed. 1321 - * 1322 - * This function cascades all vectors and executes all expired timer 1323 - * vectors. 1324 - */ 1325 - static inline void __run_timers(struct tvec_base *base) 1181 + static void expire_timers(struct timer_base *base, struct hlist_head *head) 1326 1182 { 1327 - struct timer_list *timer; 1183 + while (!hlist_empty(head)) { 1184 + struct timer_list *timer; 1185 + void (*fn)(unsigned long); 1186 + unsigned long data; 1328 1187 1329 - spin_lock_irq(&base->lock); 1188 + timer = hlist_entry(head->first, struct timer_list, entry); 1189 + timer_stats_account_timer(timer); 1330 1190 1331 - while (time_after_eq(jiffies, base->timer_jiffies)) { 1332 - struct hlist_head work_list; 1333 - struct hlist_head *head = &work_list; 1334 - int index; 1191 + base->running_timer = timer; 1192 + detach_timer(timer, true); 1335 1193 1336 - if (!base->all_timers) { 1337 - base->timer_jiffies = jiffies; 1338 - break; 1339 - } 1194 + fn = timer->function; 1195 + data = timer->data; 1340 1196 1341 - index = base->timer_jiffies & TVR_MASK; 1342 - 1343 - /* 1344 - * Cascade timers: 1345 - */ 1346 - if (!index && 1347 - (!cascade(base, &base->tv2, INDEX(0))) && 1348 - (!cascade(base, &base->tv3, INDEX(1))) && 1349 - !cascade(base, &base->tv4, INDEX(2))) 1350 - cascade(base, &base->tv5, INDEX(3)); 1351 - ++base->timer_jiffies; 1352 - hlist_move_list(base->tv1.vec + index, head); 1353 - while (!hlist_empty(head)) { 1354 - void (*fn)(unsigned long); 1355 - unsigned long data; 1356 - bool irqsafe; 1357 - 1358 - timer = hlist_entry(head->first, struct timer_list, entry); 1359 - fn = timer->function; 1360 - data = timer->data; 1361 - irqsafe = timer->flags & TIMER_IRQSAFE; 1362 - 1363 - timer_stats_account_timer(timer); 1364 - 1365 - base->running_timer = timer; 1366 - detach_expired_timer(timer, base); 1367 - 1368 - if (irqsafe) { 1369 - spin_unlock(&base->lock); 1370 - call_timer_fn(timer, fn, data); 1371 - spin_lock(&base->lock); 1372 - } else { 1373 - spin_unlock_irq(&base->lock); 1374 - call_timer_fn(timer, fn, data); 1375 - spin_lock_irq(&base->lock); 1376 - } 1197 + if (timer->flags & TIMER_IRQSAFE) { 1198 + spin_unlock(&base->lock); 1199 + call_timer_fn(timer, fn, data); 1200 + spin_lock(&base->lock); 1201 + } else { 1202 + spin_unlock_irq(&base->lock); 1203 + call_timer_fn(timer, fn, data); 1204 + spin_lock_irq(&base->lock); 1377 1205 } 1378 1206 } 1379 - base->running_timer = NULL; 1380 - spin_unlock_irq(&base->lock); 1207 + } 1208 + 1209 + static int __collect_expired_timers(struct timer_base *base, 1210 + struct hlist_head *heads) 1211 + { 1212 + unsigned long clk = base->clk; 1213 + struct hlist_head *vec; 1214 + int i, levels = 0; 1215 + unsigned int idx; 1216 + 1217 + for (i = 0; i < LVL_DEPTH; i++) { 1218 + idx = (clk & LVL_MASK) + i * LVL_SIZE; 1219 + 1220 + if (__test_and_clear_bit(idx, base->pending_map)) { 1221 + vec = base->vectors + idx; 1222 + hlist_move_list(vec, heads++); 1223 + levels++; 1224 + } 1225 + /* Is it time to look at the next level? */ 1226 + if (clk & LVL_CLK_MASK) 1227 + break; 1228 + /* Shift clock for the next level granularity */ 1229 + clk >>= LVL_CLK_SHIFT; 1230 + } 1231 + return levels; 1381 1232 } 1382 1233 1383 1234 #ifdef CONFIG_NO_HZ_COMMON 1384 1235 /* 1385 - * Find out when the next timer event is due to happen. This 1386 - * is used on S/390 to stop all activity when a CPU is idle. 1387 - * This function needs to be called with interrupts disabled. 1236 + * Find the next pending bucket of a level. Search from level start (@offset) 1237 + * + @clk upwards and if nothing there, search from start of the level 1238 + * (@offset) up to @offset + clk. 1388 1239 */ 1389 - static unsigned long __next_timer_interrupt(struct tvec_base *base) 1240 + static int next_pending_bucket(struct timer_base *base, unsigned offset, 1241 + unsigned clk) 1390 1242 { 1391 - unsigned long timer_jiffies = base->timer_jiffies; 1392 - unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; 1393 - int index, slot, array, found = 0; 1394 - struct timer_list *nte; 1395 - struct tvec *varray[4]; 1243 + unsigned pos, start = offset + clk; 1244 + unsigned end = offset + LVL_SIZE; 1396 1245 1397 - /* Look for timer events in tv1. */ 1398 - index = slot = timer_jiffies & TVR_MASK; 1399 - do { 1400 - hlist_for_each_entry(nte, base->tv1.vec + slot, entry) { 1401 - if (nte->flags & TIMER_DEFERRABLE) 1402 - continue; 1246 + pos = find_next_bit(base->pending_map, end, start); 1247 + if (pos < end) 1248 + return pos - start; 1403 1249 1404 - found = 1; 1405 - expires = nte->expires; 1406 - /* Look at the cascade bucket(s)? */ 1407 - if (!index || slot < index) 1408 - goto cascade; 1409 - return expires; 1250 + pos = find_next_bit(base->pending_map, start, offset); 1251 + return pos < start ? pos + LVL_SIZE - start : -1; 1252 + } 1253 + 1254 + /* 1255 + * Search the first expiring timer in the various clock levels. Caller must 1256 + * hold base->lock. 1257 + */ 1258 + static unsigned long __next_timer_interrupt(struct timer_base *base) 1259 + { 1260 + unsigned long clk, next, adj; 1261 + unsigned lvl, offset = 0; 1262 + 1263 + next = base->clk + NEXT_TIMER_MAX_DELTA; 1264 + clk = base->clk; 1265 + for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { 1266 + int pos = next_pending_bucket(base, offset, clk & LVL_MASK); 1267 + 1268 + if (pos >= 0) { 1269 + unsigned long tmp = clk + (unsigned long) pos; 1270 + 1271 + tmp <<= LVL_SHIFT(lvl); 1272 + if (time_before(tmp, next)) 1273 + next = tmp; 1410 1274 } 1411 - slot = (slot + 1) & TVR_MASK; 1412 - } while (slot != index); 1413 - 1414 - cascade: 1415 - /* Calculate the next cascade event */ 1416 - if (index) 1417 - timer_jiffies += TVR_SIZE - index; 1418 - timer_jiffies >>= TVR_BITS; 1419 - 1420 - /* Check tv2-tv5. */ 1421 - varray[0] = &base->tv2; 1422 - varray[1] = &base->tv3; 1423 - varray[2] = &base->tv4; 1424 - varray[3] = &base->tv5; 1425 - 1426 - for (array = 0; array < 4; array++) { 1427 - struct tvec *varp = varray[array]; 1428 - 1429 - index = slot = timer_jiffies & TVN_MASK; 1430 - do { 1431 - hlist_for_each_entry(nte, varp->vec + slot, entry) { 1432 - if (nte->flags & TIMER_DEFERRABLE) 1433 - continue; 1434 - 1435 - found = 1; 1436 - if (time_before(nte->expires, expires)) 1437 - expires = nte->expires; 1438 - } 1439 - /* 1440 - * Do we still search for the first timer or are 1441 - * we looking up the cascade buckets ? 1442 - */ 1443 - if (found) { 1444 - /* Look at the cascade bucket(s)? */ 1445 - if (!index || slot < index) 1446 - break; 1447 - return expires; 1448 - } 1449 - slot = (slot + 1) & TVN_MASK; 1450 - } while (slot != index); 1451 - 1452 - if (index) 1453 - timer_jiffies += TVN_SIZE - index; 1454 - timer_jiffies >>= TVN_BITS; 1275 + /* 1276 + * Clock for the next level. If the current level clock lower 1277 + * bits are zero, we look at the next level as is. If not we 1278 + * need to advance it by one because that's going to be the 1279 + * next expiring bucket in that level. base->clk is the next 1280 + * expiring jiffie. So in case of: 1281 + * 1282 + * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1283 + * 0 0 0 0 0 0 1284 + * 1285 + * we have to look at all levels @index 0. With 1286 + * 1287 + * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1288 + * 0 0 0 0 0 2 1289 + * 1290 + * LVL0 has the next expiring bucket @index 2. The upper 1291 + * levels have the next expiring bucket @index 1. 1292 + * 1293 + * In case that the propagation wraps the next level the same 1294 + * rules apply: 1295 + * 1296 + * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1297 + * 0 0 0 0 F 2 1298 + * 1299 + * So after looking at LVL0 we get: 1300 + * 1301 + * LVL5 LVL4 LVL3 LVL2 LVL1 1302 + * 0 0 0 1 0 1303 + * 1304 + * So no propagation from LVL1 to LVL2 because that happened 1305 + * with the add already, but then we need to propagate further 1306 + * from LVL2 to LVL3. 1307 + * 1308 + * So the simple check whether the lower bits of the current 1309 + * level are 0 or not is sufficient for all cases. 1310 + */ 1311 + adj = clk & LVL_CLK_MASK ? 1 : 0; 1312 + clk >>= LVL_CLK_SHIFT; 1313 + clk += adj; 1455 1314 } 1456 - return expires; 1315 + return next; 1457 1316 } 1458 1317 1459 1318 /* ··· 1493 1364 */ 1494 1365 u64 get_next_timer_interrupt(unsigned long basej, u64 basem) 1495 1366 { 1496 - struct tvec_base *base = this_cpu_ptr(&tvec_bases); 1367 + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1497 1368 u64 expires = KTIME_MAX; 1498 1369 unsigned long nextevt; 1499 1370 ··· 1505 1376 return expires; 1506 1377 1507 1378 spin_lock(&base->lock); 1508 - if (base->active_timers) { 1509 - if (time_before_eq(base->next_timer, base->timer_jiffies)) 1510 - base->next_timer = __next_timer_interrupt(base); 1511 - nextevt = base->next_timer; 1512 - if (time_before_eq(nextevt, basej)) 1513 - expires = basem; 1514 - else 1515 - expires = basem + (nextevt - basej) * TICK_NSEC; 1379 + nextevt = __next_timer_interrupt(base); 1380 + base->next_expiry = nextevt; 1381 + /* 1382 + * We have a fresh next event. Check whether we can forward the base: 1383 + */ 1384 + if (time_after(nextevt, jiffies)) 1385 + base->clk = jiffies; 1386 + else if (time_after(nextevt, base->clk)) 1387 + base->clk = nextevt; 1388 + 1389 + if (time_before_eq(nextevt, basej)) { 1390 + expires = basem; 1391 + base->is_idle = false; 1392 + } else { 1393 + expires = basem + (nextevt - basej) * TICK_NSEC; 1394 + /* 1395 + * If we expect to sleep more than a tick, mark the base idle: 1396 + */ 1397 + if ((expires - basem) > TICK_NSEC) 1398 + base->is_idle = true; 1516 1399 } 1517 1400 spin_unlock(&base->lock); 1518 1401 1519 1402 return cmp_next_hrtimer_event(basem, expires); 1403 + } 1404 + 1405 + /** 1406 + * timer_clear_idle - Clear the idle state of the timer base 1407 + * 1408 + * Called with interrupts disabled 1409 + */ 1410 + void timer_clear_idle(void) 1411 + { 1412 + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1413 + 1414 + /* 1415 + * We do this unlocked. The worst outcome is a remote enqueue sending 1416 + * a pointless IPI, but taking the lock would just make the window for 1417 + * sending the IPI a few instructions smaller for the cost of taking 1418 + * the lock in the exit from idle path. 1419 + */ 1420 + base->is_idle = false; 1421 + } 1422 + 1423 + static int collect_expired_timers(struct timer_base *base, 1424 + struct hlist_head *heads) 1425 + { 1426 + /* 1427 + * NOHZ optimization. After a long idle sleep we need to forward the 1428 + * base to current jiffies. Avoid a loop by searching the bitfield for 1429 + * the next expiring timer. 1430 + */ 1431 + if ((long)(jiffies - base->clk) > 2) { 1432 + unsigned long next = __next_timer_interrupt(base); 1433 + 1434 + /* 1435 + * If the next timer is ahead of time forward to current 1436 + * jiffies, otherwise forward to the next expiry time: 1437 + */ 1438 + if (time_after(next, jiffies)) { 1439 + /* The call site will increment clock! */ 1440 + base->clk = jiffies - 1; 1441 + return 0; 1442 + } 1443 + base->clk = next; 1444 + } 1445 + return __collect_expired_timers(base, heads); 1446 + } 1447 + #else 1448 + static inline int collect_expired_timers(struct timer_base *base, 1449 + struct hlist_head *heads) 1450 + { 1451 + return __collect_expired_timers(base, heads); 1520 1452 } 1521 1453 #endif 1522 1454 ··· 1601 1411 run_posix_cpu_timers(p); 1602 1412 } 1603 1413 1414 + /** 1415 + * __run_timers - run all expired timers (if any) on this CPU. 1416 + * @base: the timer vector to be processed. 1417 + */ 1418 + static inline void __run_timers(struct timer_base *base) 1419 + { 1420 + struct hlist_head heads[LVL_DEPTH]; 1421 + int levels; 1422 + 1423 + if (!time_after_eq(jiffies, base->clk)) 1424 + return; 1425 + 1426 + spin_lock_irq(&base->lock); 1427 + 1428 + while (time_after_eq(jiffies, base->clk)) { 1429 + 1430 + levels = collect_expired_timers(base, heads); 1431 + base->clk++; 1432 + 1433 + while (levels--) 1434 + expire_timers(base, heads + levels); 1435 + } 1436 + base->running_timer = NULL; 1437 + spin_unlock_irq(&base->lock); 1438 + } 1439 + 1604 1440 /* 1605 1441 * This function runs timers and the timer-tq in bottom half context. 1606 1442 */ 1607 1443 static void run_timer_softirq(struct softirq_action *h) 1608 1444 { 1609 - struct tvec_base *base = this_cpu_ptr(&tvec_bases); 1445 + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1610 1446 1611 - if (time_after_eq(jiffies, base->timer_jiffies)) 1612 - __run_timers(base); 1447 + __run_timers(base); 1448 + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) 1449 + __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); 1613 1450 } 1614 1451 1615 1452 /* ··· 1644 1427 */ 1645 1428 void run_local_timers(void) 1646 1429 { 1430 + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1431 + 1647 1432 hrtimer_run_queues(); 1433 + /* Raise the softirq only if required. */ 1434 + if (time_before(jiffies, base->clk)) { 1435 + if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active) 1436 + return; 1437 + /* CPU is awake, so check the deferrable base. */ 1438 + base++; 1439 + if (time_before(jiffies, base->clk)) 1440 + return; 1441 + } 1648 1442 raise_softirq(TIMER_SOFTIRQ); 1649 1443 } 1650 1444 ··· 1740 1512 expire = timeout + jiffies; 1741 1513 1742 1514 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); 1743 - __mod_timer(&timer, expire, false, TIMER_NOT_PINNED); 1515 + __mod_timer(&timer, expire, false); 1744 1516 schedule(); 1745 1517 del_singleshot_timer_sync(&timer); 1746 1518 ··· 1791 1563 EXPORT_SYMBOL(schedule_timeout_idle); 1792 1564 1793 1565 #ifdef CONFIG_HOTPLUG_CPU 1794 - static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head) 1566 + static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head) 1795 1567 { 1796 1568 struct timer_list *timer; 1797 1569 int cpu = new_base->cpu; 1798 1570 1799 1571 while (!hlist_empty(head)) { 1800 1572 timer = hlist_entry(head->first, struct timer_list, entry); 1801 - /* We ignore the accounting on the dying cpu */ 1802 1573 detach_timer(timer, false); 1803 1574 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; 1804 1575 internal_add_timer(new_base, timer); ··· 1806 1579 1807 1580 static void migrate_timers(int cpu) 1808 1581 { 1809 - struct tvec_base *old_base; 1810 - struct tvec_base *new_base; 1811 - int i; 1582 + struct timer_base *old_base; 1583 + struct timer_base *new_base; 1584 + int b, i; 1812 1585 1813 1586 BUG_ON(cpu_online(cpu)); 1814 - old_base = per_cpu_ptr(&tvec_bases, cpu); 1815 - new_base = get_cpu_ptr(&tvec_bases); 1816 - /* 1817 - * The caller is globally serialized and nobody else 1818 - * takes two locks at once, deadlock is not possible. 1819 - */ 1820 - spin_lock_irq(&new_base->lock); 1821 - spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1822 1587 1823 - BUG_ON(old_base->running_timer); 1588 + for (b = 0; b < NR_BASES; b++) { 1589 + old_base = per_cpu_ptr(&timer_bases[b], cpu); 1590 + new_base = get_cpu_ptr(&timer_bases[b]); 1591 + /* 1592 + * The caller is globally serialized and nobody else 1593 + * takes two locks at once, deadlock is not possible. 1594 + */ 1595 + spin_lock_irq(&new_base->lock); 1596 + spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1824 1597 1825 - for (i = 0; i < TVR_SIZE; i++) 1826 - migrate_timer_list(new_base, old_base->tv1.vec + i); 1827 - for (i = 0; i < TVN_SIZE; i++) { 1828 - migrate_timer_list(new_base, old_base->tv2.vec + i); 1829 - migrate_timer_list(new_base, old_base->tv3.vec + i); 1830 - migrate_timer_list(new_base, old_base->tv4.vec + i); 1831 - migrate_timer_list(new_base, old_base->tv5.vec + i); 1598 + BUG_ON(old_base->running_timer); 1599 + 1600 + for (i = 0; i < WHEEL_SIZE; i++) 1601 + migrate_timer_list(new_base, old_base->vectors + i); 1602 + 1603 + spin_unlock(&old_base->lock); 1604 + spin_unlock_irq(&new_base->lock); 1605 + put_cpu_ptr(&timer_bases); 1832 1606 } 1833 - 1834 - old_base->active_timers = 0; 1835 - old_base->all_timers = 0; 1836 - 1837 - spin_unlock(&old_base->lock); 1838 - spin_unlock_irq(&new_base->lock); 1839 - put_cpu_ptr(&tvec_bases); 1840 1607 } 1841 1608 1842 1609 static int timer_cpu_notify(struct notifier_block *self, ··· 1858 1637 1859 1638 static void __init init_timer_cpu(int cpu) 1860 1639 { 1861 - struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu); 1640 + struct timer_base *base; 1641 + int i; 1862 1642 1863 - base->cpu = cpu; 1864 - spin_lock_init(&base->lock); 1865 - 1866 - base->timer_jiffies = jiffies; 1867 - base->next_timer = base->timer_jiffies; 1643 + for (i = 0; i < NR_BASES; i++) { 1644 + base = per_cpu_ptr(&timer_bases[i], cpu); 1645 + base->cpu = cpu; 1646 + spin_lock_init(&base->lock); 1647 + base->clk = jiffies; 1648 + } 1868 1649 } 1869 1650 1870 1651 static void __init init_timer_cpus(void) ··· 1925 1702 } 1926 1703 1927 1704 /** 1928 - * usleep_range - Drop in replacement for udelay where wakeup is flexible 1705 + * usleep_range - Sleep for an approximate time 1929 1706 * @min: Minimum time in usecs to sleep 1930 1707 * @max: Maximum time in usecs to sleep 1708 + * 1709 + * In non-atomic context where the exact wakeup time is flexible, use 1710 + * usleep_range() instead of udelay(). The sleep improves responsiveness 1711 + * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces 1712 + * power usage by allowing hrtimers to take advantage of an already- 1713 + * scheduled interrupt instead of scheduling a new one just for this sleep. 1931 1714 */ 1932 1715 void __sched usleep_range(unsigned long min, unsigned long max) 1933 1716 {
+3 -3
kernel/time/timer_stats.c
··· 279 279 280 280 static int tstats_show(struct seq_file *m, void *v) 281 281 { 282 - struct timespec period; 282 + struct timespec64 period; 283 283 struct entry *entry; 284 284 unsigned long ms; 285 285 long events = 0; ··· 295 295 296 296 time = ktime_sub(time_stop, time_start); 297 297 298 - period = ktime_to_timespec(time); 298 + period = ktime_to_timespec64(time); 299 299 ms = period.tv_nsec / 1000000; 300 300 301 301 seq_puts(m, "Timer Stats Version: v0.3\n"); 302 - seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms); 302 + seq_printf(m, "Sample period: %ld.%03ld s\n", (long)period.tv_sec, ms); 303 303 if (atomic_read(&overflow_count)) 304 304 seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count)); 305 305 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
-1
lib/random32.c
··· 233 233 234 234 static void __init __prandom_start_seed_timer(void) 235 235 { 236 - set_timer_slack(&seed_timer, HZ); 237 236 seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC); 238 237 add_timer(&seed_timer); 239 238 }
+4 -3
net/ipv4/inet_connection_sock.c
··· 603 603 if (req->num_timeout++ == 0) 604 604 atomic_dec(&queue->young); 605 605 timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX); 606 - mod_timer_pinned(&req->rsk_timer, jiffies + timeo); 606 + mod_timer(&req->rsk_timer, jiffies + timeo); 607 607 return; 608 608 } 609 609 drop: ··· 617 617 req->num_timeout = 0; 618 618 req->sk = NULL; 619 619 620 - setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); 621 - mod_timer_pinned(&req->rsk_timer, jiffies + timeout); 620 + setup_pinned_timer(&req->rsk_timer, reqsk_timer_handler, 621 + (unsigned long)req); 622 + mod_timer(&req->rsk_timer, jiffies + timeout); 622 623 623 624 inet_ehash_insert(req_to_sk(req), NULL); 624 625 /* before letting lookups find us, make sure all req fields
+3 -2
net/ipv4/inet_timewait_sock.c
··· 188 188 tw->tw_prot = sk->sk_prot_creator; 189 189 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie)); 190 190 twsk_net_set(tw, sock_net(sk)); 191 - setup_timer(&tw->tw_timer, tw_timer_handler, (unsigned long)tw); 191 + setup_pinned_timer(&tw->tw_timer, tw_timer_handler, 192 + (unsigned long)tw); 192 193 /* 193 194 * Because we use RCU lookups, we should not set tw_refcnt 194 195 * to a non null value before everything is setup for this ··· 249 248 250 249 tw->tw_kill = timeo <= 4*HZ; 251 250 if (!rearm) { 252 - BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo)); 251 + BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo)); 253 252 atomic_inc(&tw->tw_dr->tw_count); 254 253 } else { 255 254 mod_timer_pending(&tw->tw_timer, jiffies + timeo);