Merge tag 'timers-v5.11-2' of https://git.linaro.org/people/daniel.lezcano/linux into timers/core

Pull clocksource/events updates from Daniel Lezcano:

- Fix error handling if no clock is available on dw_apb_timer_of (Dinh Nguyen)

- Fix overhead for erratum handling when the timer has no erratum and
fix fault programing for the event stream on the arm arch timer
(Keqian Zhu)

- Fix potential deadlock when calling runtime PM on sh_cmt (Niklas
Söderlund)

+71 -31
+18 -9
drivers/clocksource/arm_arch_timer.c
··· 396 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 397 398 if (access == ARCH_TIMER_PHYS_ACCESS) { 399 - cval = evt + arch_counter_get_cntpct(); 400 write_sysreg(cval, cntp_cval_el0); 401 } else { 402 - cval = evt + arch_counter_get_cntvct(); 403 write_sysreg(cval, cntv_cval_el0); 404 } 405 ··· 822 823 static void arch_timer_configure_evtstream(void) 824 { 825 - int evt_stream_div, pos; 826 827 - /* Find the closest power of two to the divisor */ 828 - evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ; 829 - pos = fls(evt_stream_div); 830 - if (pos > 1 && !(evt_stream_div & (1 << (pos - 2)))) 831 - pos--; 832 /* enable event stream */ 833 - arch_timer_evtstrm_enable(min(pos, 15)); 834 } 835 836 static void arch_counter_set_user_access(void)
··· 396 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 397 398 if (access == ARCH_TIMER_PHYS_ACCESS) { 399 + cval = evt + arch_counter_get_cntpct_stable(); 400 write_sysreg(cval, cntp_cval_el0); 401 } else { 402 + cval = evt + arch_counter_get_cntvct_stable(); 403 write_sysreg(cval, cntv_cval_el0); 404 } 405 ··· 822 823 static void arch_timer_configure_evtstream(void) 824 { 825 + int evt_stream_div, lsb; 826 827 + /* 828 + * As the event stream can at most be generated at half the frequency 829 + * of the counter, use half the frequency when computing the divider. 830 + */ 831 + evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2; 832 + 833 + /* 834 + * Find the closest power of two to the divisor. If the adjacent bit 835 + * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1). 836 + */ 837 + lsb = fls(evt_stream_div) - 1; 838 + if (lsb > 0 && (evt_stream_div & BIT(lsb - 1))) 839 + lsb++; 840 + 841 /* enable event stream */ 842 + arch_timer_evtstrm_enable(max(0, min(lsb, 15))); 843 } 844 845 static void arch_counter_set_user_access(void)
+39 -18
drivers/clocksource/dw_apb_timer_of.c
··· 14 #include <linux/reset.h> 15 #include <linux/sched_clock.h> 16 17 - static void __init timer_get_base_and_rate(struct device_node *np, 18 void __iomem **base, u32 *rate) 19 { 20 struct clk *timer_clk; 21 struct clk *pclk; 22 struct reset_control *rstc; 23 24 *base = of_iomap(np, 0); 25 ··· 47 pr_warn("pclk for %pOFn is present, but could not be activated\n", 48 np); 49 50 timer_clk = of_clk_get_by_name(np, "timer"); 51 if (IS_ERR(timer_clk)) 52 - goto try_clock_freq; 53 54 - if (!clk_prepare_enable(timer_clk)) { 55 - *rate = clk_get_rate(timer_clk); 56 - return; 57 - } 58 59 - try_clock_freq: 60 - if (of_property_read_u32(np, "clock-freq", rate) && 61 - of_property_read_u32(np, "clock-frequency", rate)) 62 - panic("No clock nor clock-frequency property for %pOFn", np); 63 } 64 65 - static void __init add_clockevent(struct device_node *event_timer) 66 { 67 void __iomem *iobase; 68 struct dw_apb_clock_event_device *ced; 69 u32 irq, rate; 70 71 irq = irq_of_parse_and_map(event_timer, 0); 72 if (irq == 0) 73 panic("No IRQ for clock event timer"); 74 75 - timer_get_base_and_rate(event_timer, &iobase, &rate); 76 77 ced = dw_apb_clockevent_init(-1, event_timer->name, 300, iobase, irq, 78 rate); 79 if (!ced) 80 - panic("Unable to initialise clockevent device"); 81 82 dw_apb_clockevent_register(ced); 83 } 84 85 static void __iomem *sched_io_base; 86 static u32 sched_rate; 87 88 - static void __init add_clocksource(struct device_node *source_timer) 89 { 90 void __iomem *iobase; 91 struct dw_apb_clocksource *cs; 92 u32 rate; 93 94 - timer_get_base_and_rate(source_timer, &iobase, &rate); 95 96 cs = dw_apb_clocksource_init(300, source_timer->name, iobase, rate); 97 if (!cs) 98 - panic("Unable to initialise clocksource device"); 99 100 dw_apb_clocksource_start(cs); 101 dw_apb_clocksource_register(cs); ··· 119 */ 120 sched_io_base = iobase + 0x04; 121 sched_rate = rate; 122 } 123 124 static u64 notrace read_sched_clock(void) ··· 161 static int num_called; 162 static int __init dw_apb_timer_init(struct device_node *timer) 163 { 164 switch (num_called) { 165 case 1: 166 pr_debug("%s: found clocksource timer\n", __func__); 167 - add_clocksource(timer); 168 init_sched_clock(); 169 #ifdef CONFIG_ARM 170 dw_apb_delay_timer.freq = sched_rate; ··· 177 break; 178 default: 179 pr_debug("%s: found clockevent timer\n", __func__); 180 - add_clockevent(timer); 181 break; 182 } 183
··· 14 #include <linux/reset.h> 15 #include <linux/sched_clock.h> 16 17 + static int __init timer_get_base_and_rate(struct device_node *np, 18 void __iomem **base, u32 *rate) 19 { 20 struct clk *timer_clk; 21 struct clk *pclk; 22 struct reset_control *rstc; 23 + int ret; 24 25 *base = of_iomap(np, 0); 26 ··· 46 pr_warn("pclk for %pOFn is present, but could not be activated\n", 47 np); 48 49 + if (!of_property_read_u32(np, "clock-freq", rate) && 50 + !of_property_read_u32(np, "clock-frequency", rate)) 51 + return 0; 52 + 53 timer_clk = of_clk_get_by_name(np, "timer"); 54 if (IS_ERR(timer_clk)) 55 + return PTR_ERR(timer_clk); 56 57 + ret = clk_prepare_enable(timer_clk); 58 + if (ret) 59 + return ret; 60 61 + *rate = clk_get_rate(timer_clk); 62 + if (!(*rate)) 63 + return -EINVAL; 64 + 65 + return 0; 66 } 67 68 + static int __init add_clockevent(struct device_node *event_timer) 69 { 70 void __iomem *iobase; 71 struct dw_apb_clock_event_device *ced; 72 u32 irq, rate; 73 + int ret = 0; 74 75 irq = irq_of_parse_and_map(event_timer, 0); 76 if (irq == 0) 77 panic("No IRQ for clock event timer"); 78 79 + ret = timer_get_base_and_rate(event_timer, &iobase, &rate); 80 + if (ret) 81 + return ret; 82 83 ced = dw_apb_clockevent_init(-1, event_timer->name, 300, iobase, irq, 84 rate); 85 if (!ced) 86 + return -EINVAL; 87 88 dw_apb_clockevent_register(ced); 89 + 90 + return 0; 91 } 92 93 static void __iomem *sched_io_base; 94 static u32 sched_rate; 95 96 + static int __init add_clocksource(struct device_node *source_timer) 97 { 98 void __iomem *iobase; 99 struct dw_apb_clocksource *cs; 100 u32 rate; 101 + int ret; 102 103 + ret = timer_get_base_and_rate(source_timer, &iobase, &rate); 104 + if (ret) 105 + return ret; 106 107 cs = dw_apb_clocksource_init(300, source_timer->name, iobase, rate); 108 if (!cs) 109 + return -EINVAL; 110 111 dw_apb_clocksource_start(cs); 112 dw_apb_clocksource_register(cs); ··· 106 */ 107 sched_io_base = iobase + 0x04; 108 sched_rate = rate; 109 + 110 + return 0; 111 } 112 113 static u64 notrace read_sched_clock(void) ··· 146 static int num_called; 147 static int __init dw_apb_timer_init(struct device_node *timer) 148 { 149 + int ret = 0; 150 + 151 switch (num_called) { 152 case 1: 153 pr_debug("%s: found clocksource timer\n", __func__); 154 + ret = add_clocksource(timer); 155 + if (ret) 156 + return ret; 157 init_sched_clock(); 158 #ifdef CONFIG_ARM 159 dw_apb_delay_timer.freq = sched_rate; ··· 158 break; 159 default: 160 pr_debug("%s: found clockevent timer\n", __func__); 161 + ret = add_clockevent(timer); 162 + if (ret) 163 + return ret; 164 break; 165 } 166
+14 -4
drivers/clocksource/sh_cmt.c
··· 319 { 320 int k, ret; 321 322 - pm_runtime_get_sync(&ch->cmt->pdev->dev); 323 dev_pm_syscore_device(&ch->cmt->pdev->dev, true); 324 325 /* enable clock */ ··· 393 clk_disable(ch->cmt->clk); 394 395 dev_pm_syscore_device(&ch->cmt->pdev->dev, false); 396 - pm_runtime_put(&ch->cmt->pdev->dev); 397 } 398 399 /* private flags */ ··· 560 int ret = 0; 561 unsigned long flags; 562 563 raw_spin_lock_irqsave(&ch->lock, flags); 564 565 - if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) 566 ret = sh_cmt_enable(ch); 567 568 if (ret) 569 goto out; ··· 594 f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); 595 ch->flags &= ~flag; 596 597 - if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) 598 sh_cmt_disable(ch); 599 600 /* adjust the timeout to maximum if only clocksource left */ 601 if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE)) 602 __sh_cmt_set_next(ch, ch->max_match_value); 603 604 raw_spin_unlock_irqrestore(&ch->lock, flags); 605 } 606 607 static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
··· 319 { 320 int k, ret; 321 322 dev_pm_syscore_device(&ch->cmt->pdev->dev, true); 323 324 /* enable clock */ ··· 394 clk_disable(ch->cmt->clk); 395 396 dev_pm_syscore_device(&ch->cmt->pdev->dev, false); 397 } 398 399 /* private flags */ ··· 562 int ret = 0; 563 unsigned long flags; 564 565 + if (flag & FLAG_CLOCKSOURCE) 566 + pm_runtime_get_sync(&ch->cmt->pdev->dev); 567 + 568 raw_spin_lock_irqsave(&ch->lock, flags); 569 570 + if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) { 571 + if (flag & FLAG_CLOCKEVENT) 572 + pm_runtime_get_sync(&ch->cmt->pdev->dev); 573 ret = sh_cmt_enable(ch); 574 + } 575 576 if (ret) 577 goto out; ··· 590 f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); 591 ch->flags &= ~flag; 592 593 + if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) { 594 sh_cmt_disable(ch); 595 + if (flag & FLAG_CLOCKEVENT) 596 + pm_runtime_put(&ch->cmt->pdev->dev); 597 + } 598 599 /* adjust the timeout to maximum if only clocksource left */ 600 if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE)) 601 __sh_cmt_set_next(ch, ch->max_match_value); 602 603 raw_spin_unlock_irqrestore(&ch->lock, flags); 604 + 605 + if (flag & FLAG_CLOCKSOURCE) 606 + pm_runtime_put(&ch->cmt->pdev->dev); 607 } 608 609 static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)