Merge tag 'timers-v5.11-2' of https://git.linaro.org/people/daniel.lezcano/linux into timers/core

Pull clocksource/events updates from Daniel Lezcano:

- Fix error handling if no clock is available on dw_apb_timer_of (Dinh Nguyen)

- Fix overhead for erratum handling when the timer has no erratum and
fix fault programing for the event stream on the arm arch timer
(Keqian Zhu)

- Fix potential deadlock when calling runtime PM on sh_cmt (Niklas
Söderlund)

+71 -31
+18 -9
drivers/clocksource/arm_arch_timer.c
··· 396 396 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 397 397 398 398 if (access == ARCH_TIMER_PHYS_ACCESS) { 399 - cval = evt + arch_counter_get_cntpct(); 399 + cval = evt + arch_counter_get_cntpct_stable(); 400 400 write_sysreg(cval, cntp_cval_el0); 401 401 } else { 402 - cval = evt + arch_counter_get_cntvct(); 402 + cval = evt + arch_counter_get_cntvct_stable(); 403 403 write_sysreg(cval, cntv_cval_el0); 404 404 } 405 405 ··· 822 822 823 823 static void arch_timer_configure_evtstream(void) 824 824 { 825 - int evt_stream_div, pos; 825 + int evt_stream_div, lsb; 826 826 827 - /* Find the closest power of two to the divisor */ 828 - evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ; 829 - pos = fls(evt_stream_div); 830 - if (pos > 1 && !(evt_stream_div & (1 << (pos - 2)))) 831 - pos--; 827 + /* 828 + * As the event stream can at most be generated at half the frequency 829 + * of the counter, use half the frequency when computing the divider. 830 + */ 831 + evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2; 832 + 833 + /* 834 + * Find the closest power of two to the divisor. If the adjacent bit 835 + * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1). 836 + */ 837 + lsb = fls(evt_stream_div) - 1; 838 + if (lsb > 0 && (evt_stream_div & BIT(lsb - 1))) 839 + lsb++; 840 + 832 841 /* enable event stream */ 833 - arch_timer_evtstrm_enable(min(pos, 15)); 842 + arch_timer_evtstrm_enable(max(0, min(lsb, 15))); 834 843 } 835 844 836 845 static void arch_counter_set_user_access(void)
+39 -18
drivers/clocksource/dw_apb_timer_of.c
··· 14 14 #include <linux/reset.h> 15 15 #include <linux/sched_clock.h> 16 16 17 - static void __init timer_get_base_and_rate(struct device_node *np, 17 + static int __init timer_get_base_and_rate(struct device_node *np, 18 18 void __iomem **base, u32 *rate) 19 19 { 20 20 struct clk *timer_clk; 21 21 struct clk *pclk; 22 22 struct reset_control *rstc; 23 + int ret; 23 24 24 25 *base = of_iomap(np, 0); 25 26 ··· 47 46 pr_warn("pclk for %pOFn is present, but could not be activated\n", 48 47 np); 49 48 49 + if (!of_property_read_u32(np, "clock-freq", rate) && 50 + !of_property_read_u32(np, "clock-frequency", rate)) 51 + return 0; 52 + 50 53 timer_clk = of_clk_get_by_name(np, "timer"); 51 54 if (IS_ERR(timer_clk)) 52 - goto try_clock_freq; 55 + return PTR_ERR(timer_clk); 53 56 54 - if (!clk_prepare_enable(timer_clk)) { 55 - *rate = clk_get_rate(timer_clk); 56 - return; 57 - } 57 + ret = clk_prepare_enable(timer_clk); 58 + if (ret) 59 + return ret; 58 60 59 - try_clock_freq: 60 - if (of_property_read_u32(np, "clock-freq", rate) && 61 - of_property_read_u32(np, "clock-frequency", rate)) 62 - panic("No clock nor clock-frequency property for %pOFn", np); 61 + *rate = clk_get_rate(timer_clk); 62 + if (!(*rate)) 63 + return -EINVAL; 64 + 65 + return 0; 63 66 } 64 67 65 - static void __init add_clockevent(struct device_node *event_timer) 68 + static int __init add_clockevent(struct device_node *event_timer) 66 69 { 67 70 void __iomem *iobase; 68 71 struct dw_apb_clock_event_device *ced; 69 72 u32 irq, rate; 73 + int ret = 0; 70 74 71 75 irq = irq_of_parse_and_map(event_timer, 0); 72 76 if (irq == 0) 73 77 panic("No IRQ for clock event timer"); 74 78 75 - timer_get_base_and_rate(event_timer, &iobase, &rate); 79 + ret = timer_get_base_and_rate(event_timer, &iobase, &rate); 80 + if (ret) 81 + return ret; 76 82 77 83 ced = dw_apb_clockevent_init(-1, event_timer->name, 300, iobase, irq, 78 84 rate); 79 85 if (!ced) 80 - panic("Unable to initialise clockevent device"); 86 + return -EINVAL; 81 87 82 88 dw_apb_clockevent_register(ced); 89 + 90 + return 0; 83 91 } 84 92 85 93 static void __iomem *sched_io_base; 86 94 static u32 sched_rate; 87 95 88 - static void __init add_clocksource(struct device_node *source_timer) 96 + static int __init add_clocksource(struct device_node *source_timer) 89 97 { 90 98 void __iomem *iobase; 91 99 struct dw_apb_clocksource *cs; 92 100 u32 rate; 101 + int ret; 93 102 94 - timer_get_base_and_rate(source_timer, &iobase, &rate); 103 + ret = timer_get_base_and_rate(source_timer, &iobase, &rate); 104 + if (ret) 105 + return ret; 95 106 96 107 cs = dw_apb_clocksource_init(300, source_timer->name, iobase, rate); 97 108 if (!cs) 98 - panic("Unable to initialise clocksource device"); 109 + return -EINVAL; 99 110 100 111 dw_apb_clocksource_start(cs); 101 112 dw_apb_clocksource_register(cs); ··· 119 106 */ 120 107 sched_io_base = iobase + 0x04; 121 108 sched_rate = rate; 109 + 110 + return 0; 122 111 } 123 112 124 113 static u64 notrace read_sched_clock(void) ··· 161 146 static int num_called; 162 147 static int __init dw_apb_timer_init(struct device_node *timer) 163 148 { 149 + int ret = 0; 150 + 164 151 switch (num_called) { 165 152 case 1: 166 153 pr_debug("%s: found clocksource timer\n", __func__); 167 - add_clocksource(timer); 154 + ret = add_clocksource(timer); 155 + if (ret) 156 + return ret; 168 157 init_sched_clock(); 169 158 #ifdef CONFIG_ARM 170 159 dw_apb_delay_timer.freq = sched_rate; ··· 177 158 break; 178 159 default: 179 160 pr_debug("%s: found clockevent timer\n", __func__); 180 - add_clockevent(timer); 161 + ret = add_clockevent(timer); 162 + if (ret) 163 + return ret; 181 164 break; 182 165 } 183 166
+14 -4
drivers/clocksource/sh_cmt.c
··· 319 319 { 320 320 int k, ret; 321 321 322 - pm_runtime_get_sync(&ch->cmt->pdev->dev); 323 322 dev_pm_syscore_device(&ch->cmt->pdev->dev, true); 324 323 325 324 /* enable clock */ ··· 393 394 clk_disable(ch->cmt->clk); 394 395 395 396 dev_pm_syscore_device(&ch->cmt->pdev->dev, false); 396 - pm_runtime_put(&ch->cmt->pdev->dev); 397 397 } 398 398 399 399 /* private flags */ ··· 560 562 int ret = 0; 561 563 unsigned long flags; 562 564 565 + if (flag & FLAG_CLOCKSOURCE) 566 + pm_runtime_get_sync(&ch->cmt->pdev->dev); 567 + 563 568 raw_spin_lock_irqsave(&ch->lock, flags); 564 569 565 - if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) 570 + if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) { 571 + if (flag & FLAG_CLOCKEVENT) 572 + pm_runtime_get_sync(&ch->cmt->pdev->dev); 566 573 ret = sh_cmt_enable(ch); 574 + } 567 575 568 576 if (ret) 569 577 goto out; ··· 594 590 f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); 595 591 ch->flags &= ~flag; 596 592 597 - if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) 593 + if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) { 598 594 sh_cmt_disable(ch); 595 + if (flag & FLAG_CLOCKEVENT) 596 + pm_runtime_put(&ch->cmt->pdev->dev); 597 + } 599 598 600 599 /* adjust the timeout to maximum if only clocksource left */ 601 600 if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE)) 602 601 __sh_cmt_set_next(ch, ch->max_match_value); 603 602 604 603 raw_spin_unlock_irqrestore(&ch->lock, flags); 604 + 605 + if (flag & FLAG_CLOCKSOURCE) 606 + pm_runtime_put(&ch->cmt->pdev->dev); 605 607 } 606 608 607 609 static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)