Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

clocksource: pass clocksource to read() callback

Pass clocksource pointer to the read() callback for clocksources. This
allows us to share the callback between multiple instances.

[hugh@veritas.com: fix powerpc build of clocksource pass clocksource mods]
[akpm@linux-foundation.org: cleanup]
Signed-off-by: Magnus Damm <damm@igel.co.jp>
Acked-by: John Stultz <johnstul@us.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Magnus Damm and committed by
Linus Torvalds
8e19608e ff14ed5d

+94 -79
+1 -1
arch/arm/mach-at91/at91rm9200_time.c
··· 85 85 .handler = at91rm9200_timer_interrupt 86 86 }; 87 87 88 - static cycle_t read_clk32k(void) 88 + static cycle_t read_clk32k(struct clocksource *cs) 89 89 { 90 90 return read_CRTR(); 91 91 }
+1 -1
arch/arm/mach-at91/at91sam926x_time.c
··· 31 31 * Clocksource: just a monotonic counter of MCK/16 cycles. 32 32 * We don't care whether or not PIT irqs are enabled. 33 33 */ 34 - static cycle_t read_pit_clk(void) 34 + static cycle_t read_pit_clk(struct clocksource *cs) 35 35 { 36 36 unsigned long flags; 37 37 u32 elapsed;
+1 -1
arch/arm/mach-davinci/time.c
··· 238 238 /* 239 239 * clocksource 240 240 */ 241 - static cycle_t read_cycles(void) 241 + static cycle_t read_cycles(struct clocksource *cs) 242 242 { 243 243 struct timer_s *t = &timers[TID_CLOCKSOURCE]; 244 244
+1 -1
arch/arm/mach-imx/time.c
··· 73 73 IMX_TCTL(TIMER_BASE) = TCTL_FRR | TCTL_CLK_PCLK1 | TCTL_TEN; 74 74 } 75 75 76 - cycle_t imx_get_cycles(void) 76 + cycle_t imx_get_cycles(struct clocksource *cs) 77 77 { 78 78 return IMX_TCN(TIMER_BASE); 79 79 }
+1 -1
arch/arm/mach-ixp4xx/common.c
··· 401 401 /* 402 402 * clocksource 403 403 */ 404 - cycle_t ixp4xx_get_cycles(void) 404 + cycle_t ixp4xx_get_cycles(struct clocksource *cs) 405 405 { 406 406 return *IXP4XX_OSTS; 407 407 }
+2 -2
arch/arm/mach-msm/timer.c
··· 57 57 return IRQ_HANDLED; 58 58 } 59 59 60 - static cycle_t msm_gpt_read(void) 60 + static cycle_t msm_gpt_read(struct clocksource *cs) 61 61 { 62 62 return readl(MSM_GPT_BASE + TIMER_COUNT_VAL); 63 63 } 64 64 65 - static cycle_t msm_dgt_read(void) 65 + static cycle_t msm_dgt_read(struct clocksource *cs) 66 66 { 67 67 return readl(MSM_DGT_BASE + TIMER_COUNT_VAL) >> MSM_DGT_SHIFT; 68 68 }
+1 -1
arch/arm/mach-netx/time.c
··· 104 104 .handler = netx_timer_interrupt, 105 105 }; 106 106 107 - cycle_t netx_get_cycles(void) 107 + cycle_t netx_get_cycles(struct clocksource *cs) 108 108 { 109 109 return readl(NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE)); 110 110 }
+1 -1
arch/arm/mach-ns9xxx/time-ns9360.c
··· 25 25 #define TIMER_CLOCKEVENT 1 26 26 static u32 latch; 27 27 28 - static cycle_t ns9360_clocksource_read(void) 28 + static cycle_t ns9360_clocksource_read(struct clocksource *cs) 29 29 { 30 30 return __raw_readl(SYS_TR(TIMER_CLOCKSOURCE)); 31 31 }
+1 -1
arch/arm/mach-omap1/time.c
··· 198 198 .handler = omap_mpu_timer2_interrupt, 199 199 }; 200 200 201 - static cycle_t mpu_read(void) 201 + static cycle_t mpu_read(struct clocksource *cs) 202 202 { 203 203 return ~omap_mpu_timer_read(1); 204 204 }
+1 -1
arch/arm/mach-omap2/timer-gp.c
··· 138 138 * clocksource 139 139 */ 140 140 static struct omap_dm_timer *gpt_clocksource; 141 - static cycle_t clocksource_read_cycles(void) 141 + static cycle_t clocksource_read_cycles(struct clocksource *cs) 142 142 { 143 143 return (cycle_t)omap_dm_timer_read_counter(gpt_clocksource); 144 144 }
+1 -1
arch/arm/mach-pxa/time.c
··· 125 125 .set_mode = pxa_osmr0_set_mode, 126 126 }; 127 127 128 - static cycle_t pxa_read_oscr(void) 128 + static cycle_t pxa_read_oscr(struct clocksource *cs) 129 129 { 130 130 return OSCR; 131 131 }
+1 -1
arch/arm/mach-realview/core.c
··· 715 715 .handler = realview_timer_interrupt, 716 716 }; 717 717 718 - static cycle_t realview_get_cycles(void) 718 + static cycle_t realview_get_cycles(struct clocksource *cs) 719 719 { 720 720 return ~readl(timer3_va_base + TIMER_VALUE); 721 721 }
+1 -1
arch/arm/mach-versatile/core.c
··· 948 948 .handler = versatile_timer_interrupt, 949 949 }; 950 950 951 - static cycle_t versatile_get_cycles(void) 951 + static cycle_t versatile_get_cycles(struct clocksource *cs) 952 952 { 953 953 return ~readl(TIMER3_VA_BASE + TIMER_VALUE); 954 954 }
+1 -1
arch/arm/plat-mxc/time.c
··· 36 36 37 37 /* clock source */ 38 38 39 - static cycle_t mxc_get_cycles(void) 39 + static cycle_t mxc_get_cycles(struct clocksource *cs) 40 40 { 41 41 return __raw_readl(TIMER_BASE + MXC_TCN); 42 42 }
+2 -2
arch/arm/plat-omap/common.c
··· 185 185 186 186 #include <linux/clocksource.h> 187 187 188 - static cycle_t omap_32k_read(void) 188 + static cycle_t omap_32k_read(struct clocksource *cs) 189 189 { 190 190 return omap_readl(TIMER_32K_SYNCHRONIZED); 191 191 } ··· 207 207 { 208 208 unsigned long long ret; 209 209 210 - ret = (unsigned long long)omap_32k_read(); 210 + ret = (unsigned long long)omap_32k_read(&clocksource_32k); 211 211 ret = (ret * clocksource_32k.mult_orig) >> clocksource_32k.shift; 212 212 return ret; 213 213 }
+1 -1
arch/arm/plat-orion/time.c
··· 41 41 /* 42 42 * Clocksource handling. 43 43 */ 44 - static cycle_t orion_clksrc_read(void) 44 + static cycle_t orion_clksrc_read(struct clocksource *cs) 45 45 { 46 46 return 0xffffffff - readl(TIMER0_VAL); 47 47 }
+1 -1
arch/avr32/kernel/time.c
··· 18 18 #include <mach/pm.h> 19 19 20 20 21 - static cycle_t read_cycle_count(void) 21 + static cycle_t read_cycle_count(struct clocksource *cs) 22 22 { 23 23 return (cycle_t)sysreg_read(COUNT); 24 24 }
+6 -6
arch/blackfin/kernel/time-ts.c
··· 58 58 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; 59 59 } 60 60 61 - static cycle_t read_cycles(void) 61 + static cycle_t read_cycles(struct clocksource *cs) 62 62 { 63 63 return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); 64 - } 65 - 66 - unsigned long long sched_clock(void) 67 - { 68 - return cycles_2_ns(read_cycles()); 69 64 } 70 65 71 66 static struct clocksource clocksource_bfin = { ··· 71 76 .shift = 22, 72 77 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 73 78 }; 79 + 80 + unsigned long long sched_clock(void) 81 + { 82 + return cycles_2_ns(read_cycles(&clocksource_bfin)); 83 + } 74 84 75 85 static int __init bfin_clocksource_init(void) 76 86 {
+1 -1
arch/ia64/kernel/cyclone.c
··· 21 21 22 22 static void __iomem *cyclone_mc; 23 23 24 - static cycle_t read_cyclone(void) 24 + static cycle_t read_cyclone(struct clocksource *cs) 25 25 { 26 26 return (cycle_t)readq((void __iomem *)cyclone_mc); 27 27 }
+2 -2
arch/ia64/kernel/time.c
··· 33 33 34 34 #include "fsyscall_gtod_data.h" 35 35 36 - static cycle_t itc_get_cycles(void); 36 + static cycle_t itc_get_cycles(struct clocksource *cs); 37 37 38 38 struct fsyscall_gtod_data_t fsyscall_gtod_data = { 39 39 .lock = SEQLOCK_UNLOCKED, ··· 383 383 } 384 384 } 385 385 386 - static cycle_t itc_get_cycles(void) 386 + static cycle_t itc_get_cycles(struct clocksource *cs) 387 387 { 388 388 u64 lcycle, now, ret; 389 389
+1 -1
arch/ia64/sn/kernel/sn2/timer.c
··· 23 23 24 24 extern unsigned long sn_rtc_cycles_per_second; 25 25 26 - static cycle_t read_sn2(void) 26 + static cycle_t read_sn2(struct clocksource *cs) 27 27 { 28 28 return (cycle_t)readq(RTC_COUNTER_ADDR); 29 29 }
+1 -1
arch/m68knommu/platform/68328/timers.c
··· 75 75 76 76 /***************************************************************************/ 77 77 78 - static cycle_t m68328_read_clk(void) 78 + static cycle_t m68328_read_clk(struct clocksource *cs) 79 79 { 80 80 unsigned long flags; 81 81 u32 cycles;
+1 -1
arch/m68knommu/platform/coldfire/dma_timer.c
··· 34 34 #define DMA_DTMR_CLK_DIV_16 (2 << 1) 35 35 #define DMA_DTMR_ENABLE (1 << 0) 36 36 37 - static cycle_t cf_dt_get_cycles(void) 37 + static cycle_t cf_dt_get_cycles(struct clocksource *cs) 38 38 { 39 39 return __raw_readl(DTCN0); 40 40 }
+1 -1
arch/m68knommu/platform/coldfire/pit.c
··· 125 125 126 126 /***************************************************************************/ 127 127 128 - static cycle_t pit_read_clk(void) 128 + static cycle_t pit_read_clk(struct clocksource *cs) 129 129 { 130 130 unsigned long flags; 131 131 u32 cycles;
+1 -1
arch/m68knommu/platform/coldfire/timers.c
··· 78 78 79 79 /***************************************************************************/ 80 80 81 - static cycle_t mcftmr_read_clk(void) 81 + static cycle_t mcftmr_read_clk(struct clocksource *cs) 82 82 { 83 83 unsigned long flags; 84 84 u32 cycles;
+1 -1
arch/mips/kernel/cevt-txx9.c
··· 22 22 23 23 static struct txx9_tmr_reg __iomem *txx9_cs_tmrptr; 24 24 25 - static cycle_t txx9_cs_read(void) 25 + static cycle_t txx9_cs_read(struct clocksource *cs) 26 26 { 27 27 return __raw_readl(&txx9_cs_tmrptr->trr); 28 28 }
+1 -1
arch/mips/kernel/csrc-bcm1480.c
··· 28 28 29 29 #include <asm/sibyte/sb1250.h> 30 30 31 - static cycle_t bcm1480_hpt_read(void) 31 + static cycle_t bcm1480_hpt_read(struct clocksource *cs) 32 32 { 33 33 return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT)); 34 34 }
+3 -3
arch/mips/kernel/csrc-ioasic.c
··· 25 25 #include <asm/dec/ioasic.h> 26 26 #include <asm/dec/ioasic_addrs.h> 27 27 28 - static cycle_t dec_ioasic_hpt_read(void) 28 + static cycle_t dec_ioasic_hpt_read(struct clocksource *cs) 29 29 { 30 30 return ioasic_read(IO_REG_FCTR); 31 31 } ··· 47 47 while (!ds1287_timer_state()) 48 48 ; 49 49 50 - start = dec_ioasic_hpt_read(); 50 + start = dec_ioasic_hpt_read(&clocksource_dec); 51 51 52 52 while (i--) 53 53 while (!ds1287_timer_state()) 54 54 ; 55 55 56 - end = dec_ioasic_hpt_read(); 56 + end = dec_ioasic_hpt_read(&clocksource_dec); 57 57 58 58 freq = (end - start) * 10; 59 59 printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
+1 -1
arch/mips/kernel/csrc-r4k.c
··· 10 10 11 11 #include <asm/time.h> 12 12 13 - static cycle_t c0_hpt_read(void) 13 + static cycle_t c0_hpt_read(struct clocksource *cs) 14 14 { 15 15 return read_c0_count(); 16 16 }
+1 -1
arch/mips/kernel/csrc-sb1250.c
··· 33 33 * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over 34 34 * again. 35 35 */ 36 - static cycle_t sb1250_hpt_read(void) 36 + static cycle_t sb1250_hpt_read(struct clocksource *cs) 37 37 { 38 38 unsigned int count; 39 39
+1 -1
arch/mips/kernel/i8253.c
··· 128 128 * to just read by itself. So use jiffies to emulate a free 129 129 * running counter: 130 130 */ 131 - static cycle_t pit_read(void) 131 + static cycle_t pit_read(struct clocksource *cs) 132 132 { 133 133 unsigned long flags; 134 134 int count;
+1 -1
arch/mips/nxp/pnx8550/common/time.c
··· 35 35 36 36 static unsigned long cpj; 37 37 38 - static cycle_t hpt_read(void) 38 + static cycle_t hpt_read(struct clocksource *cs) 39 39 { 40 40 return read_c0_count2(); 41 41 }
+1 -1
arch/mips/sgi-ip27/ip27-timer.c
··· 159 159 setup_irq(irq, &hub_rt_irqaction); 160 160 } 161 161 162 - static cycle_t hub_rt_read(void) 162 + static cycle_t hub_rt_read(struct clocksource *cs) 163 163 { 164 164 return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT); 165 165 }
+4 -4
arch/powerpc/kernel/time.c
··· 77 77 #include <linux/clockchips.h> 78 78 #include <linux/clocksource.h> 79 79 80 - static cycle_t rtc_read(void); 80 + static cycle_t rtc_read(struct clocksource *); 81 81 static struct clocksource clocksource_rtc = { 82 82 .name = "rtc", 83 83 .rating = 400, ··· 88 88 .read = rtc_read, 89 89 }; 90 90 91 - static cycle_t timebase_read(void); 91 + static cycle_t timebase_read(struct clocksource *); 92 92 static struct clocksource clocksource_timebase = { 93 93 .name = "timebase", 94 94 .rating = 400, ··· 766 766 } 767 767 768 768 /* clocksource code */ 769 - static cycle_t rtc_read(void) 769 + static cycle_t rtc_read(struct clocksource *cs) 770 770 { 771 771 return (cycle_t)get_rtc(); 772 772 } 773 773 774 - static cycle_t timebase_read(void) 774 + static cycle_t timebase_read(struct clocksource *cs) 775 775 { 776 776 return (cycle_t)get_tb(); 777 777 }
+1 -1
arch/s390/kernel/time.c
··· 201 201 return ts.tv_sec; 202 202 } 203 203 204 - static cycle_t read_tod_clock(void) 204 + static cycle_t read_tod_clock(struct clocksource *cs) 205 205 { 206 206 return get_clock(); 207 207 }
+1 -1
arch/sh/kernel/time_32.c
··· 208 208 if (!clocksource_sh.rating) 209 209 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); 210 210 211 - cycles = clocksource_sh.read(); 211 + cycles = clocksource_sh.read(&clocksource_sh); 212 212 return cyc2ns(&clocksource_sh, cycles); 213 213 } 214 214 #endif
+1 -1
arch/sh/kernel/timers/timer-tmu.c
··· 81 81 */ 82 82 static int tmus_are_scaled; 83 83 84 - static cycle_t tmu_timer_read(void) 84 + static cycle_t tmu_timer_read(struct clocksource *cs) 85 85 { 86 86 return ((cycle_t)(~_tmu_read(TMU1)))<<tmus_are_scaled; 87 87 }
+6 -1
arch/sparc/kernel/time_64.c
··· 814 814 } 815 815 EXPORT_SYMBOL(udelay); 816 816 817 + static cycle_t clocksource_tick_read(struct clocksource *cs) 818 + { 819 + return tick_ops->get_tick(); 820 + } 821 + 817 822 void __init time_init(void) 818 823 { 819 824 unsigned long freq = sparc64_init_timers(); ··· 832 827 clocksource_tick.mult = 833 828 clocksource_hz2mult(freq, 834 829 clocksource_tick.shift); 835 - clocksource_tick.read = tick_ops->get_tick; 830 + clocksource_tick.read = clocksource_tick_read; 836 831 837 832 printk("clocksource: mult[%x] shift[%d]\n", 838 833 clocksource_tick.mult, clocksource_tick.shift);
+1 -1
arch/um/kernel/time.c
··· 65 65 return IRQ_HANDLED; 66 66 } 67 67 68 - static cycle_t itimer_read(void) 68 + static cycle_t itimer_read(struct clocksource *cs) 69 69 { 70 70 return os_nsecs() / 1000; 71 71 }
+3 -3
arch/x86/kernel/hpet.c
··· 722 722 /* 723 723 * Clock source related code 724 724 */ 725 - static cycle_t read_hpet(void) 725 + static cycle_t read_hpet(struct clocksource *cs) 726 726 { 727 727 return (cycle_t)hpet_readl(HPET_COUNTER); 728 728 } ··· 756 756 hpet_restart_counter(); 757 757 758 758 /* Verify whether hpet counter works */ 759 - t1 = read_hpet(); 759 + t1 = hpet_readl(HPET_COUNTER); 760 760 rdtscll(start); 761 761 762 762 /* ··· 770 770 rdtscll(now); 771 771 } while ((now - start) < 200000UL); 772 772 773 - if (t1 == read_hpet()) { 773 + if (t1 == hpet_readl(HPET_COUNTER)) { 774 774 printk(KERN_WARNING 775 775 "HPET counter not counting. HPET disabled\n"); 776 776 return -ENODEV;
+1 -1
arch/x86/kernel/i8253.c
··· 129 129 * to just read by itself. So use jiffies to emulate a free 130 130 * running counter: 131 131 */ 132 - static cycle_t pit_read(void) 132 + static cycle_t pit_read(struct clocksource *cs) 133 133 { 134 134 static int old_count; 135 135 static u32 old_jifs;
+6 -1
arch/x86/kernel/kvmclock.c
··· 77 77 return ret; 78 78 } 79 79 80 + static cycle_t kvm_clock_get_cycles(struct clocksource *cs) 81 + { 82 + return kvm_clock_read(); 83 + } 84 + 80 85 /* 81 86 * If we don't do that, there is the possibility that the guest 82 87 * will calibrate under heavy load - thus, getting a lower lpj - ··· 112 107 113 108 static struct clocksource kvm_clock = { 114 109 .name = "kvm-clock", 115 - .read = kvm_clock_read, 110 + .read = kvm_clock_get_cycles, 116 111 .rating = 400, 117 112 .mask = CLOCKSOURCE_MASK(64), 118 113 .mult = 1 << KVM_SCALE,
+1 -1
arch/x86/kernel/tsc.c
··· 699 699 * code, which is necessary to support wrapping clocksources like pm 700 700 * timer. 701 701 */ 702 - static cycle_t read_tsc(void) 702 + static cycle_t read_tsc(struct clocksource *cs) 703 703 { 704 704 cycle_t ret = (cycle_t)get_cycles(); 705 705
+1 -1
arch/x86/kernel/vmiclock_32.c
··· 283 283 /** vmi clocksource */ 284 284 static struct clocksource clocksource_vmi; 285 285 286 - static cycle_t read_real_cycles(void) 286 + static cycle_t read_real_cycles(struct clocksource *cs) 287 287 { 288 288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); 289 289 return max(ret, clocksource_vmi.cycle_last);
+1 -1
arch/x86/lguest/boot.c
··· 663 663 664 664 /* If we can't use the TSC, the kernel falls back to our lower-priority 665 665 * "lguest_clock", where we read the time value given to us by the Host. */ 666 - static cycle_t lguest_clock_read(void) 666 + static cycle_t lguest_clock_read(struct clocksource *cs) 667 667 { 668 668 unsigned long sec, nsec; 669 669
+6 -1
arch/x86/xen/time.c
··· 213 213 return ret; 214 214 } 215 215 216 + static cycle_t xen_clocksource_get_cycles(struct clocksource *cs) 217 + { 218 + return xen_clocksource_read(); 219 + } 220 + 216 221 static void xen_read_wallclock(struct timespec *ts) 217 222 { 218 223 struct shared_info *s = HYPERVISOR_shared_info; ··· 246 241 static struct clocksource xen_clocksource __read_mostly = { 247 242 .name = "xen", 248 243 .rating = 400, 249 - .read = xen_clocksource_read, 244 + .read = xen_clocksource_get_cycles, 250 245 .mask = ~0, 251 246 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */ 252 247 .shift = XEN_SHIFT,
+1 -1
drivers/char/hpet.c
··· 72 72 #ifdef CONFIG_IA64 73 73 static void __iomem *hpet_mctr; 74 74 75 - static cycle_t read_hpet(void) 75 + static cycle_t read_hpet(struct clocksource *cs) 76 76 { 77 77 return (cycle_t)read_counter((void __iomem *)hpet_mctr); 78 78 }
+6 -6
drivers/clocksource/acpi_pm.c
··· 57 57 return v2; 58 58 } 59 59 60 - static cycle_t acpi_pm_read(void) 60 + static cycle_t acpi_pm_read(struct clocksource *cs) 61 61 { 62 62 return (cycle_t)read_pmtmr(); 63 63 } ··· 83 83 } 84 84 __setup("acpi_pm_good", acpi_pm_good_setup); 85 85 86 - static cycle_t acpi_pm_read_slow(void) 86 + static cycle_t acpi_pm_read_slow(struct clocksource *cs) 87 87 { 88 88 return (cycle_t)acpi_pm_read_verified(); 89 89 } ··· 156 156 unsigned long count, delta; 157 157 158 158 mach_prepare_counter(); 159 - value1 = clocksource_acpi_pm.read(); 159 + value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm); 160 160 mach_countup(&count); 161 - value2 = clocksource_acpi_pm.read(); 161 + value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm); 162 162 delta = (value2 - value1) & ACPI_PM_MASK; 163 163 164 164 /* Check that the PMTMR delta is within 5% of what we expect */ ··· 195 195 /* "verify" this timing source: */ 196 196 for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) { 197 197 udelay(100 * j); 198 - value1 = clocksource_acpi_pm.read(); 198 + value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm); 199 199 for (i = 0; i < ACPI_PM_READ_CHECKS; i++) { 200 - value2 = clocksource_acpi_pm.read(); 200 + value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm); 201 201 if (value2 == value1) 202 202 continue; 203 203 if (value2 > value1)
+1 -1
drivers/clocksource/cyclone.c
··· 19 19 int use_cyclone = 0; 20 20 static void __iomem *cyclone_ptr; 21 21 22 - static cycle_t read_cyclone(void) 22 + static cycle_t read_cyclone(struct clocksource *cs) 23 23 { 24 24 return (cycle_t)readl(cyclone_ptr); 25 25 }
+1 -1
drivers/clocksource/scx200_hrt.c
··· 43 43 /* The base timer frequency, * 27 if selected */ 44 44 #define HRT_FREQ 1000000 45 45 46 - static cycle_t read_hrt(void) 46 + static cycle_t read_hrt(struct clocksource *cs) 47 47 { 48 48 /* Read the timer value */ 49 49 return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
+1 -1
drivers/clocksource/tcb_clksrc.c
··· 39 39 40 40 static void __iomem *tcaddr; 41 41 42 - static cycle_t tc_get_cycles(void) 42 + static cycle_t tc_get_cycles(struct clocksource *cs) 43 43 { 44 44 unsigned long flags; 45 45 u32 lower, upper;
+3 -3
include/linux/clocksource.h
··· 143 143 * 400-499: Perfect 144 144 * The ideal clocksource. A must-use where 145 145 * available. 146 - * @read: returns a cycle value 146 + * @read: returns a cycle value, passes clocksource as argument 147 147 * @mask: bitmask for two's complement 148 148 * subtraction of non 64 bit counters 149 149 * @mult: cycle to nanosecond multiplier (adjusted by NTP) ··· 162 162 char *name; 163 163 struct list_head list; 164 164 int rating; 165 - cycle_t (*read)(void); 165 + cycle_t (*read)(struct clocksource *cs); 166 166 cycle_t mask; 167 167 u32 mult; 168 168 u32 mult_orig; ··· 271 271 */ 272 272 static inline cycle_t clocksource_read(struct clocksource *cs) 273 273 { 274 - return cs->read(); 274 + return cs->read(cs); 275 275 } 276 276 277 277 /**
+4 -4
kernel/time/clocksource.c
··· 181 181 182 182 resumed = test_and_clear_bit(0, &watchdog_resumed); 183 183 184 - wdnow = watchdog->read(); 184 + wdnow = watchdog->read(watchdog); 185 185 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); 186 186 watchdog_last = wdnow; 187 187 188 188 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 189 - csnow = cs->read(); 189 + csnow = cs->read(cs); 190 190 191 191 if (unlikely(resumed)) { 192 192 cs->wd_last = csnow; ··· 247 247 248 248 list_add(&cs->wd_list, &watchdog_list); 249 249 if (!started && watchdog) { 250 - watchdog_last = watchdog->read(); 250 + watchdog_last = watchdog->read(watchdog); 251 251 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 252 252 add_timer_on(&watchdog_timer, 253 253 cpumask_first(cpu_online_mask)); ··· 268 268 cse->flags &= ~CLOCK_SOURCE_WATCHDOG; 269 269 /* Start if list is not empty */ 270 270 if (!list_empty(&watchdog_list)) { 271 - watchdog_last = watchdog->read(); 271 + watchdog_last = watchdog->read(watchdog); 272 272 watchdog_timer.expires = 273 273 jiffies + WATCHDOG_INTERVAL; 274 274 add_timer_on(&watchdog_timer,
+1 -1
kernel/time/jiffies.c
··· 50 50 */ 51 51 #define JIFFIES_SHIFT 8 52 52 53 - static cycle_t jiffies_read(void) 53 + static cycle_t jiffies_read(struct clocksource *cs) 54 54 { 55 55 return (cycle_t) jiffies; 56 56 }