Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'timers-clocksource-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'timers-clocksource-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
clocksource: convert mips to generic i8253 clocksource
clocksource: convert x86 to generic i8253 clocksource
clocksource: convert footbridge to generic i8253 clocksource
clocksource: add common i8253 PIT clocksource
blackfin: convert to clocksource_register_hz
mips: convert to clocksource_register_hz/khz
sparc: convert to clocksource_register_hz/khz
alpha: convert to clocksource_register_hz
microblaze: convert to clocksource_register_hz/khz
ia64: convert to clocksource_register_hz/khz
x86: Convert remaining x86 clocksources to clocksource_register_hz/khz
Make clocksource name const

+157 -355
+15
arch/arm/include/asm/i8253.h
··· 1 + #ifndef __ASMARM_I8253_H 2 + #define __ASMARM_I8253_H 3 + 4 + /* i8253A PIT registers */ 5 + #define PIT_MODE 0x43 6 + #define PIT_CH0 0x40 7 + 8 + #define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) 9 + 10 + extern raw_spinlock_t i8253_lock; 11 + 12 + #define outb_pit outb_p 13 + #define inb_pit inb_p 14 + 15 + #endif
+2
arch/arm/mach-footbridge/Kconfig
··· 4 4 5 5 config ARCH_CATS 6 6 bool "CATS" 7 + select CLKSRC_I8253 7 8 select FOOTBRIDGE_HOST 8 9 select ISA 9 10 select ISA_DMA ··· 60 59 61 60 config ARCH_NETWINDER 62 61 bool "NetWinder" 62 + select CLKSRC_I8253 63 63 select FOOTBRIDGE_HOST 64 64 select ISA 65 65 select ISA_DMA
+4 -41
arch/arm/mach-footbridge/isa-timer.c
··· 10 10 #include <linux/interrupt.h> 11 11 #include <linux/irq.h> 12 12 #include <linux/io.h> 13 + #include <linux/spinlock.h> 13 14 #include <linux/timex.h> 14 15 15 16 #include <asm/irq.h> 16 - 17 + #include <asm/i8253.h> 17 18 #include <asm/mach/time.h> 18 19 19 20 #include "common.h" 20 21 21 - #define PIT_MODE 0x43 22 - #define PIT_CH0 0x40 23 - 24 - #define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) 25 - 26 - static cycle_t pit_read(struct clocksource *cs) 27 - { 28 - unsigned long flags; 29 - static int old_count; 30 - static u32 old_jifs; 31 - int count; 32 - u32 jifs; 33 - 34 - raw_local_irq_save(flags); 35 - 36 - jifs = jiffies; 37 - outb_p(0x00, PIT_MODE); /* latch the count */ 38 - count = inb_p(PIT_CH0); /* read the latched count */ 39 - count |= inb_p(PIT_CH0) << 8; 40 - 41 - if (count > old_count && jifs == old_jifs) 42 - count = old_count; 43 - 44 - old_count = count; 45 - old_jifs = jifs; 46 - 47 - raw_local_irq_restore(flags); 48 - 49 - count = (PIT_LATCH - 1) - count; 50 - 51 - return (cycle_t)(jifs * PIT_LATCH) + count; 52 - } 53 - 54 - static struct clocksource pit_cs = { 55 - .name = "pit", 56 - .rating = 110, 57 - .read = pit_read, 58 - .mask = CLOCKSOURCE_MASK(32), 59 - }; 22 + DEFINE_RAW_SPINLOCK(i8253_lock); 60 23 61 24 static void pit_set_mode(enum clock_event_mode mode, 62 25 struct clock_event_device *evt) ··· 84 121 pit_ce.max_delta_ns = clockevent_delta2ns(0x7fff, &pit_ce); 85 122 pit_ce.min_delta_ns = clockevent_delta2ns(0x000f, &pit_ce); 86 123 87 - clocksource_register_hz(&pit_cs, PIT_TICK_RATE); 124 + clocksource_i8253_init(); 88 125 89 126 setup_irq(pit_ce.irq, &pit_timer_irq); 90 127 clockevents_register_device(&pit_ce);
+2 -33
arch/blackfin/kernel/time-ts.c
··· 23 23 #include <asm/gptimers.h> 24 24 #include <asm/nmi.h> 25 25 26 - /* Accelerators for sched_clock() 27 - * convert from cycles(64bits) => nanoseconds (64bits) 28 - * basic equation: 29 - * ns = cycles / (freq / ns_per_sec) 30 - * ns = cycles * (ns_per_sec / freq) 31 - * ns = cycles * (10^9 / (cpu_khz * 10^3)) 32 - * ns = cycles * (10^6 / cpu_khz) 33 - * 34 - * Then we use scaling math (suggested by george@mvista.com) to get: 35 - * ns = cycles * (10^6 * SC / cpu_khz) / SC 36 - * ns = cycles * cyc2ns_scale / SC 37 - * 38 - * And since SC is a constant power of two, we can convert the div 39 - * into a shift. 40 - * 41 - * We can use khz divisor instead of mhz to keep a better precision, since 42 - * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. 43 - * (mathieu.desnoyers@polymtl.ca) 44 - * 45 - * -johnstul@us.ibm.com "math is hard, lets go shopping!" 46 - */ 47 - 48 - #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 49 26 50 27 #if defined(CONFIG_CYCLES_CLOCKSOURCE) 51 28 ··· 40 63 .rating = 400, 41 64 .read = bfin_read_cycles, 42 65 .mask = CLOCKSOURCE_MASK(64), 43 - .shift = CYC2NS_SCALE_FACTOR, 44 66 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 45 67 }; 46 68 ··· 51 75 52 76 static int __init bfin_cs_cycles_init(void) 53 77 { 54 - bfin_cs_cycles.mult = \ 55 - clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift); 56 - 57 - if (clocksource_register(&bfin_cs_cycles)) 78 + if (clocksource_register_hz(&bfin_cs_cycles, get_cclk())) 58 79 panic("failed to register clocksource"); 59 80 60 81 return 0; ··· 84 111 .rating = 350, 85 112 .read = bfin_read_gptimer0, 86 113 .mask = CLOCKSOURCE_MASK(32), 87 - .shift = CYC2NS_SCALE_FACTOR, 88 114 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 89 115 }; 90 116 ··· 97 125 { 98 126 setup_gptimer0(); 99 127 100 - bfin_cs_gptimer0.mult = \ 101 - clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift); 102 - 103 - if (clocksource_register(&bfin_cs_gptimer0)) 128 + if (clocksource_register_hz(&bfin_cs_gptimer0, get_sclk())) 104 129 panic("failed to register clocksource"); 105 130 106 131 return 0;
+1 -5
arch/ia64/kernel/cyclone.c
··· 31 31 .rating = 300, 32 32 .read = read_cyclone, 33 33 .mask = (1LL << 40) - 1, 34 - .mult = 0, /*to be calculated*/ 35 - .shift = 16, 36 34 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 37 35 }; 38 36 ··· 116 118 /* initialize last tick */ 117 119 cyclone_mc = cyclone_timer; 118 120 clocksource_cyclone.fsys_mmio = cyclone_timer; 119 - clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ, 120 - clocksource_cyclone.shift); 121 - clocksource_register(&clocksource_cyclone); 121 + clocksource_register_hz(&clocksource_cyclone, CYCLONE_TIMER_FREQ); 122 122 123 123 return 0; 124 124 }
+2 -7
arch/ia64/kernel/time.c
··· 73 73 .rating = 350, 74 74 .read = itc_get_cycles, 75 75 .mask = CLOCKSOURCE_MASK(64), 76 - .mult = 0, /*to be calculated*/ 77 - .shift = 16, 78 76 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 79 77 #ifdef CONFIG_PARAVIRT 80 78 .resume = paravirt_clocksource_resume, ··· 363 365 ia64_cpu_local_tick(); 364 366 365 367 if (!itc_clocksource) { 366 - /* Sort out mult/shift values: */ 367 - clocksource_itc.mult = 368 - clocksource_hz2mult(local_cpu_data->itc_freq, 369 - clocksource_itc.shift); 370 - clocksource_register(&clocksource_itc); 368 + clocksource_register_hz(&clocksource_itc, 369 + local_cpu_data->itc_freq); 371 370 itc_clocksource = &clocksource_itc; 372 371 } 373 372 }
+1 -5
arch/ia64/sn/kernel/sn2/timer.c
··· 33 33 .rating = 450, 34 34 .read = read_sn2, 35 35 .mask = (1LL << 55) - 1, 36 - .mult = 0, 37 - .shift = 10, 38 36 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 39 37 }; 40 38 ··· 55 57 void __init sn_timer_init(void) 56 58 { 57 59 clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; 58 - clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, 59 - clocksource_sn2.shift); 60 - clocksource_register(&clocksource_sn2); 60 + clocksource_register_hz(&clocksource_sn2, sn_rtc_cycles_per_second); 61 61 62 62 ia64_udelay = &ia64_sn_udelay; 63 63 }
+1 -5
arch/microblaze/kernel/timer.c
··· 217 217 .rating = 300, 218 218 .read = microblaze_read, 219 219 .mask = CLOCKSOURCE_MASK(32), 220 - .shift = 8, /* I can shift it */ 221 220 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 222 221 }; 223 222 224 223 static int __init microblaze_clocksource_init(void) 225 224 { 226 - clocksource_microblaze.mult = 227 - clocksource_hz2mult(timer_clock_freq, 228 - clocksource_microblaze.shift); 229 - if (clocksource_register(&clocksource_microblaze)) 225 + if (clocksource_register_hz(&clocksource_microblaze, timer_clock_freq)) 230 226 panic("failed to register clocksource"); 231 227 232 228 /* stop timer1 */
+1
arch/mips/Kconfig
··· 2395 2395 2396 2396 config I8253 2397 2397 bool 2398 + select CLKSRC_I8253 2398 2399 select MIPS_EXTERNAL_TIMER 2399 2400 2400 2401 config ZONE_DMA32
+1 -2
arch/mips/alchemy/common/time.c
··· 141 141 goto cntr_err; 142 142 143 143 /* register counter1 clocksource and event device */ 144 - clocksource_set_clock(&au1x_counter1_clocksource, 32768); 145 - clocksource_register(&au1x_counter1_clocksource); 144 + clocksource_register_hz(&au1x_counter1_clocksource, 32768); 146 145 147 146 cd->shift = 32; 148 147 cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift);
+1 -2
arch/mips/cavium-octeon/csrc-octeon.c
··· 105 105 void __init plat_time_init(void) 106 106 { 107 107 clocksource_mips.rating = 300; 108 - clocksource_set_clock(&clocksource_mips, octeon_get_clock_rate()); 109 - clocksource_register(&clocksource_mips); 108 + clocksource_register_hz(&clocksource_mips, octeon_get_clock_rate()); 110 109 } 111 110 112 111 static u64 octeon_udelay_factor;
+5
arch/mips/include/asm/i8253.h
··· 12 12 #define PIT_CH0 0x40 13 13 #define PIT_CH2 0x42 14 14 15 + #define PIT_LATCH LATCH 16 + 15 17 extern raw_spinlock_t i8253_lock; 16 18 17 19 extern void setup_pit_timer(void); 20 + 21 + #define inb_pit inb_p 22 + #define outb_pit outb_p 18 23 19 24 #endif /* __ASM_I8253_H */
-6
arch/mips/include/asm/time.h
··· 84 84 #endif 85 85 } 86 86 87 - static inline void clocksource_set_clock(struct clocksource *cs, 88 - unsigned int clock) 89 - { 90 - clocksource_calc_mult_shift(cs, clock, 4); 91 - } 92 - 93 87 static inline void clockevent_set_clock(struct clock_event_device *cd, 94 88 unsigned int clock) 95 89 {
+1 -2
arch/mips/jz4740/time.c
··· 121 121 122 122 clockevents_register_device(&jz4740_clockevent); 123 123 124 - clocksource_set_clock(&jz4740_clocksource, clk_rate); 125 - ret = clocksource_register(&jz4740_clocksource); 124 + ret = clocksource_register_hz(&jz4740_clocksource, clk_rate); 126 125 127 126 if (ret) 128 127 printk(KERN_ERR "Failed to register clocksource: %d\n", ret);
+1 -2
arch/mips/kernel/cevt-txx9.c
··· 51 51 { 52 52 struct txx9_tmr_reg __iomem *tmrptr; 53 53 54 - clocksource_set_clock(&txx9_clocksource.cs, TIMER_CLK(imbusclk)); 55 - clocksource_register(&txx9_clocksource.cs); 54 + clocksource_register_hz(&txx9_clocksource.cs, TIMER_CLK(imbusclk)); 56 55 57 56 tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); 58 57 __raw_writel(TCR_BASE, &tmrptr->tcr);
+1 -2
arch/mips/kernel/csrc-bcm1480.c
··· 49 49 50 50 plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG))); 51 51 zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000); 52 - clocksource_set_clock(cs, zbbus); 53 - clocksource_register(cs); 52 + clocksource_register_hz(cs, zbbus); 54 53 }
+1 -3
arch/mips/kernel/csrc-ioasic.c
··· 59 59 printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq); 60 60 61 61 clocksource_dec.rating = 200 + freq / 10000000; 62 - clocksource_set_clock(&clocksource_dec, freq); 63 - 64 - clocksource_register(&clocksource_dec); 62 + clocksource_register_hz(&clocksource_dec, freq); 65 63 }
+3 -32
arch/mips/kernel/csrc-powertv.c
··· 78 78 79 79 clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; 80 80 81 - clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); 82 - 83 - clocksource_register(&clocksource_mips); 81 + clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); 84 82 } 85 83 86 84 /** ··· 128 130 /** 129 131 * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock 130 132 * 131 - * The hard part here is coming up with a constant k and shift s such that 132 - * the 48-bit TIM_C value multiplied by k doesn't overflow and that value, 133 - * when shifted right by s, yields the corresponding number of nanoseconds. 134 133 * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to 135 - * 1 / (27,000,000/8) seconds. Multiply that by a billion and you get the 136 - * number of nanoseconds. Since the TIM_C value has 48 bits and the math is 137 - * done in 64 bits, avoiding an overflow means that k must be less than 138 - * 64 - 48 = 16 bits. 134 + * 1 / (27,000,000/8) seconds. 139 135 */ 140 136 static void __init powertv_tim_c_clocksource_init(void) 141 137 { 142 - int prescale; 143 - unsigned long dividend; 144 - unsigned long k; 145 - int s; 146 - const int max_k_bits = (64 - 48) - 1; 147 - const unsigned long billion = 1000000000; 148 138 const unsigned long counts_per_second = 27000000 / 8; 149 139 150 - prescale = BITS_PER_LONG - ilog2(billion) - 1; 151 - dividend = billion << prescale; 152 - k = dividend / counts_per_second; 153 - s = ilog2(k) - max_k_bits; 154 - 155 - if (s < 0) 156 - s = prescale; 157 - 158 - else { 159 - k >>= s; 160 - s += prescale; 161 - } 162 - 163 - clocksource_tim_c.mult = k; 164 - clocksource_tim_c.shift = s; 165 140 clocksource_tim_c.rating = 200; 166 141 167 - clocksource_register(&clocksource_tim_c); 142 + clocksource_register_hz(&clocksource_tim_c, counts_per_second); 168 143 tim_c = (struct tim_c *) asic_reg_addr(tim_ch); 169 144 } 170 145
+1 -3
arch/mips/kernel/csrc-r4k.c
··· 30 30 /* Calculate a somewhat reasonable rating value */ 31 31 clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; 32 32 33 - clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); 34 - 35 - clocksource_register(&clocksource_mips); 33 + clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); 36 34 37 35 return 0; 38 36 }
+1 -2
arch/mips/kernel/csrc-sb1250.c
··· 65 65 IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, 66 66 R_SCD_TIMER_CFG))); 67 67 68 - clocksource_set_clock(cs, V_SCD_TIMER_FREQ); 69 - clocksource_register(cs); 68 + clocksource_register_hz(cs, V_SCD_TIMER_FREQ); 70 69 }
+1 -77
arch/mips/kernel/i8253.c
··· 125 125 setup_irq(0, &irq0); 126 126 } 127 127 128 - /* 129 - * Since the PIT overflows every tick, its not very useful 130 - * to just read by itself. So use jiffies to emulate a free 131 - * running counter: 132 - */ 133 - static cycle_t pit_read(struct clocksource *cs) 134 - { 135 - unsigned long flags; 136 - int count; 137 - u32 jifs; 138 - static int old_count; 139 - static u32 old_jifs; 140 - 141 - raw_spin_lock_irqsave(&i8253_lock, flags); 142 - /* 143 - * Although our caller may have the read side of xtime_lock, 144 - * this is now a seqlock, and we are cheating in this routine 145 - * by having side effects on state that we cannot undo if 146 - * there is a collision on the seqlock and our caller has to 147 - * retry. (Namely, old_jifs and old_count.) So we must treat 148 - * jiffies as volatile despite the lock. We read jiffies 149 - * before latching the timer count to guarantee that although 150 - * the jiffies value might be older than the count (that is, 151 - * the counter may underflow between the last point where 152 - * jiffies was incremented and the point where we latch the 153 - * count), it cannot be newer. 154 - */ 155 - jifs = jiffies; 156 - outb_p(0x00, PIT_MODE); /* latch the count ASAP */ 157 - count = inb_p(PIT_CH0); /* read the latched count */ 158 - count |= inb_p(PIT_CH0) << 8; 159 - 160 - /* VIA686a test code... reset the latch if count > max + 1 */ 161 - if (count > LATCH) { 162 - outb_p(0x34, PIT_MODE); 163 - outb_p(LATCH & 0xff, PIT_CH0); 164 - outb(LATCH >> 8, PIT_CH0); 165 - count = LATCH - 1; 166 - } 167 - 168 - /* 169 - * It's possible for count to appear to go the wrong way for a 170 - * couple of reasons: 171 - * 172 - * 1. The timer counter underflows, but we haven't handled the 173 - * resulting interrupt and incremented jiffies yet. 174 - * 2. Hardware problem with the timer, not giving us continuous time, 175 - * the counter does small "jumps" upwards on some Pentium systems, 176 - * (see c't 95/10 page 335 for Neptun bug.) 177 - * 178 - * Previous attempts to handle these cases intelligently were 179 - * buggy, so we just do the simple thing now. 180 - */ 181 - if (count > old_count && jifs == old_jifs) { 182 - count = old_count; 183 - } 184 - old_count = count; 185 - old_jifs = jifs; 186 - 187 - raw_spin_unlock_irqrestore(&i8253_lock, flags); 188 - 189 - count = (LATCH - 1) - count; 190 - 191 - return (cycle_t)(jifs * LATCH) + count; 192 - } 193 - 194 - static struct clocksource clocksource_pit = { 195 - .name = "pit", 196 - .rating = 110, 197 - .read = pit_read, 198 - .mask = CLOCKSOURCE_MASK(32), 199 - .mult = 0, 200 - .shift = 20, 201 - }; 202 - 203 128 static int __init init_pit_clocksource(void) 204 129 { 205 130 if (num_possible_cpus() > 1) /* PIT does not scale! */ 206 131 return 0; 207 132 208 - clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); 209 - return clocksource_register(&clocksource_pit); 133 + return clocksource_i8253_init(); 210 134 } 211 135 arch_initcall(init_pit_clocksource);
+1 -4
arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
··· 201 201 .rating = 120, /* Functional for real use, but not desired */ 202 202 .read = mfgpt_read, 203 203 .mask = CLOCKSOURCE_MASK(32), 204 - .mult = 0, 205 - .shift = 22, 206 204 }; 207 205 208 206 int __init init_mfgpt_clocksource(void) ··· 208 210 if (num_possible_cpus() > 1) /* MFGPT does not scale! */ 209 211 return 0; 210 212 211 - clocksource_mfgpt.mult = clocksource_hz2mult(MFGPT_TICK_RATE, 22); 212 - return clocksource_register(&clocksource_mfgpt); 213 + return clocksource_register_hz(&clocksource_mfgpt, MFGPT_TICK_RATE); 213 214 } 214 215 215 216 arch_initcall(init_mfgpt_clocksource);
+1 -2
arch/mips/sgi-ip27/ip27-timer.c
··· 163 163 { 164 164 struct clocksource *cs = &hub_rt_clocksource; 165 165 166 - clocksource_set_clock(cs, CYCLES_PER_SEC); 167 - clocksource_register(cs); 166 + clocksource_register_hz(cs, CYCLES_PER_SEC); 168 167 } 169 168 170 169 void __init plat_time_init(void)
+1
arch/x86/Kconfig
··· 8 8 9 9 config X86_32 10 10 def_bool !64BIT 11 + select CLKSRC_I8253 11 12 12 13 config X86_64 13 14 def_bool 64BIT
+2
arch/x86/include/asm/i8253.h
··· 6 6 #define PIT_CH0 0x40 7 7 #define PIT_CH2 0x42 8 8 9 + #define PIT_LATCH LATCH 10 + 9 11 extern raw_spinlock_t i8253_lock; 10 12 11 13 extern struct clock_event_device *global_clock_event;
+1 -9
arch/x86/kernel/apb_timer.c
··· 177 177 .rating = APBT_CLOCKSOURCE_RATING, 178 178 .read = apbt_read_clocksource, 179 179 .mask = APBT_MASK, 180 - .shift = APBT_SHIFT, 181 180 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 182 181 .resume = apbt_restart_clocksource, 183 182 }; ··· 542 543 if (t1 == apbt_read_clocksource(&clocksource_apbt)) 543 544 panic("APBT counter not counting. APBT disabled\n"); 544 545 545 - /* 546 - * initialize and register APBT clocksource 547 - * convert that to ns/clock cycle 548 - * mult = (ns/c) * 2^APBT_SHIFT 549 - */ 550 - clocksource_apbt.mult = div_sc(MSEC_PER_SEC, 551 - (unsigned long) apbt_freq, APBT_SHIFT); 552 - clocksource_register(&clocksource_apbt); 546 + clocksource_register_khz(&clocksource_apbt, (u32)apbt_freq*1000); 553 547 554 548 return 0; 555 549 }
+1 -79
arch/x86/kernel/i8253.c
··· 117 117 } 118 118 119 119 #ifndef CONFIG_X86_64 120 - /* 121 - * Since the PIT overflows every tick, its not very useful 122 - * to just read by itself. So use jiffies to emulate a free 123 - * running counter: 124 - */ 125 - static cycle_t pit_read(struct clocksource *cs) 126 - { 127 - static int old_count; 128 - static u32 old_jifs; 129 - unsigned long flags; 130 - int count; 131 - u32 jifs; 132 - 133 - raw_spin_lock_irqsave(&i8253_lock, flags); 134 - /* 135 - * Although our caller may have the read side of xtime_lock, 136 - * this is now a seqlock, and we are cheating in this routine 137 - * by having side effects on state that we cannot undo if 138 - * there is a collision on the seqlock and our caller has to 139 - * retry. (Namely, old_jifs and old_count.) So we must treat 140 - * jiffies as volatile despite the lock. We read jiffies 141 - * before latching the timer count to guarantee that although 142 - * the jiffies value might be older than the count (that is, 143 - * the counter may underflow between the last point where 144 - * jiffies was incremented and the point where we latch the 145 - * count), it cannot be newer. 146 - */ 147 - jifs = jiffies; 148 - outb_pit(0x00, PIT_MODE); /* latch the count ASAP */ 149 - count = inb_pit(PIT_CH0); /* read the latched count */ 150 - count |= inb_pit(PIT_CH0) << 8; 151 - 152 - /* VIA686a test code... reset the latch if count > max + 1 */ 153 - if (count > LATCH) { 154 - outb_pit(0x34, PIT_MODE); 155 - outb_pit(LATCH & 0xff, PIT_CH0); 156 - outb_pit(LATCH >> 8, PIT_CH0); 157 - count = LATCH - 1; 158 - } 159 - 160 - /* 161 - * It's possible for count to appear to go the wrong way for a 162 - * couple of reasons: 163 - * 164 - * 1. The timer counter underflows, but we haven't handled the 165 - * resulting interrupt and incremented jiffies yet. 166 - * 2. Hardware problem with the timer, not giving us continuous time, 167 - * the counter does small "jumps" upwards on some Pentium systems, 168 - * (see c't 95/10 page 335 for Neptun bug.) 169 - * 170 - * Previous attempts to handle these cases intelligently were 171 - * buggy, so we just do the simple thing now. 172 - */ 173 - if (count > old_count && jifs == old_jifs) 174 - count = old_count; 175 - 176 - old_count = count; 177 - old_jifs = jifs; 178 - 179 - raw_spin_unlock_irqrestore(&i8253_lock, flags); 180 - 181 - count = (LATCH - 1) - count; 182 - 183 - return (cycle_t)(jifs * LATCH) + count; 184 - } 185 - 186 - static struct clocksource pit_cs = { 187 - .name = "pit", 188 - .rating = 110, 189 - .read = pit_read, 190 - .mask = CLOCKSOURCE_MASK(32), 191 - .mult = 0, 192 - .shift = 20, 193 - }; 194 - 195 120 static int __init init_pit_clocksource(void) 196 121 { 197 122 /* ··· 130 205 pit_ce.mode != CLOCK_EVT_MODE_PERIODIC) 131 206 return 0; 132 207 133 - pit_cs.mult = clocksource_hz2mult(CLOCK_TICK_RATE, pit_cs.shift); 134 - 135 - return clocksource_register(&pit_cs); 208 + return clocksource_i8253_init(); 136 209 } 137 210 arch_initcall(init_pit_clocksource); 138 - 139 211 #endif /* !CONFIG_X86_64 */
+1 -5
arch/x86/kernel/kvmclock.c
··· 26 26 #include <asm/x86_init.h> 27 27 #include <asm/reboot.h> 28 28 29 - #define KVM_SCALE 22 30 - 31 29 static int kvmclock = 1; 32 30 static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME; 33 31 static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK; ··· 118 120 .read = kvm_clock_get_cycles, 119 121 .rating = 400, 120 122 .mask = CLOCKSOURCE_MASK(64), 121 - .mult = 1 << KVM_SCALE, 122 - .shift = KVM_SCALE, 123 123 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 124 124 }; 125 125 ··· 199 203 machine_ops.crash_shutdown = kvm_crash_shutdown; 200 204 #endif 201 205 kvm_get_preset_lpj(); 202 - clocksource_register(&kvm_clock); 206 + clocksource_register_hz(&kvm_clock, NSEC_PER_SEC); 203 207 pv_info.paravirt_enabled = 1; 204 208 pv_info.name = "KVM"; 205 209
+1 -3
arch/x86/lguest/boot.c
··· 913 913 .rating = 200, 914 914 .read = lguest_clock_read, 915 915 .mask = CLOCKSOURCE_MASK(64), 916 - .mult = 1 << 22, 917 - .shift = 22, 918 916 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 919 917 }; 920 918 ··· 995 997 /* Set up the timer interrupt (0) to go to our simple timer routine */ 996 998 irq_set_handler(0, lguest_time_irq); 997 999 998 - clocksource_register(&lguest_clock); 1000 + clocksource_register_hz(&lguest_clock, NSEC_PER_SEC); 999 1001 1000 1002 /* We can't set cpumask in the initializer: damn C limitations! Set it 1001 1003 * here and register our timer device. */
+1 -5
arch/x86/platform/uv/uv_time.c
··· 40 40 .rating = 400, 41 41 .read = uv_read_rtc, 42 42 .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK, 43 - .shift = 10, 44 43 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 45 44 }; 46 45 ··· 371 372 if (!is_uv_system()) 372 373 return -ENODEV; 373 374 374 - clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, 375 - clocksource_uv.shift); 376 - 377 375 /* If single blade, prefer tsc */ 378 376 if (uv_num_possible_blades() == 1) 379 377 clocksource_uv.rating = 250; 380 378 381 - rc = clocksource_register(&clocksource_uv); 379 + rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second); 382 380 if (rc) 383 381 printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc); 384 382 else
+1 -5
arch/x86/xen/time.c
··· 26 26 27 27 #include "xen-ops.h" 28 28 29 - #define XEN_SHIFT 22 30 - 31 29 /* Xen may fire a timer up to this many ns early */ 32 30 #define TIMER_SLOP 100000 33 31 #define NS_PER_TICK (1000000000LL / HZ) ··· 209 211 .rating = 400, 210 212 .read = xen_clocksource_get_cycles, 211 213 .mask = ~0, 212 - .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */ 213 - .shift = XEN_SHIFT, 214 214 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 215 215 }; 216 216 ··· 444 448 int cpu = smp_processor_id(); 445 449 struct timespec tp; 446 450 447 - clocksource_register(&xen_clocksource); 451 + clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC); 448 452 449 453 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) { 450 454 /* Successfully turned off 100Hz tick, so we have the
+3
drivers/Kconfig
··· 119 119 source "drivers/clk/Kconfig" 120 120 121 121 source "drivers/hwspinlock/Kconfig" 122 + 123 + source "drivers/clocksource/Kconfig" 124 + 122 125 endmenu
+1 -5
drivers/char/hpet.c
··· 84 84 .rating = 250, 85 85 .read = read_hpet, 86 86 .mask = CLOCKSOURCE_MASK(64), 87 - .mult = 0, /* to be calculated */ 88 - .shift = 10, 89 87 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 90 88 }; 91 89 static struct clocksource *hpet_clocksource; ··· 932 934 if (!hpet_clocksource) { 933 935 hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc; 934 936 CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr); 935 - clocksource_hpet.mult = clocksource_hz2mult(hpetp->hp_tick_freq, 936 - clocksource_hpet.shift); 937 - clocksource_register(&clocksource_hpet); 937 + clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq); 938 938 hpetp->hp_clocksource = &clocksource_hpet; 939 939 hpet_clocksource = &clocksource_hpet; 940 940 }
+2
drivers/clocksource/Kconfig
··· 1 + config CLKSRC_I8253 2 + bool
+1
drivers/clocksource/Makefile
··· 6 6 obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o 7 7 obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o 8 8 obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o 9 + obj-$(CONFIG_CLKSRC_I8253) += i8253.o
+2 -8
drivers/clocksource/cyclone.c
··· 29 29 .rating = 250, 30 30 .read = read_cyclone, 31 31 .mask = CYCLONE_TIMER_MASK, 32 - .mult = 10, 33 - .shift = 0, 34 32 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 35 33 }; 36 34 ··· 106 108 } 107 109 cyclone_ptr = cyclone_timer; 108 110 109 - /* sort out mult/shift values: */ 110 - clocksource_cyclone.shift = 22; 111 - clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ, 112 - clocksource_cyclone.shift); 113 - 114 - return clocksource_register(&clocksource_cyclone); 111 + return clocksource_register_hz(&clocksource_cyclone, 112 + CYCLONE_TIMER_FREQ); 115 113 } 116 114 117 115 arch_initcall(init_cyclone_clocksource);
+88
drivers/clocksource/i8253.c
··· 1 + /* 2 + * i8253 PIT clocksource 3 + */ 4 + #include <linux/clocksource.h> 5 + #include <linux/init.h> 6 + #include <linux/io.h> 7 + #include <linux/spinlock.h> 8 + #include <linux/timex.h> 9 + 10 + #include <asm/i8253.h> 11 + 12 + /* 13 + * Since the PIT overflows every tick, its not very useful 14 + * to just read by itself. So use jiffies to emulate a free 15 + * running counter: 16 + */ 17 + static cycle_t i8253_read(struct clocksource *cs) 18 + { 19 + static int old_count; 20 + static u32 old_jifs; 21 + unsigned long flags; 22 + int count; 23 + u32 jifs; 24 + 25 + raw_spin_lock_irqsave(&i8253_lock, flags); 26 + /* 27 + * Although our caller may have the read side of xtime_lock, 28 + * this is now a seqlock, and we are cheating in this routine 29 + * by having side effects on state that we cannot undo if 30 + * there is a collision on the seqlock and our caller has to 31 + * retry. (Namely, old_jifs and old_count.) So we must treat 32 + * jiffies as volatile despite the lock. We read jiffies 33 + * before latching the timer count to guarantee that although 34 + * the jiffies value might be older than the count (that is, 35 + * the counter may underflow between the last point where 36 + * jiffies was incremented and the point where we latch the 37 + * count), it cannot be newer. 38 + */ 39 + jifs = jiffies; 40 + outb_pit(0x00, PIT_MODE); /* latch the count ASAP */ 41 + count = inb_pit(PIT_CH0); /* read the latched count */ 42 + count |= inb_pit(PIT_CH0) << 8; 43 + 44 + /* VIA686a test code... reset the latch if count > max + 1 */ 45 + if (count > LATCH) { 46 + outb_pit(0x34, PIT_MODE); 47 + outb_pit(PIT_LATCH & 0xff, PIT_CH0); 48 + outb_pit(PIT_LATCH >> 8, PIT_CH0); 49 + count = PIT_LATCH - 1; 50 + } 51 + 52 + /* 53 + * It's possible for count to appear to go the wrong way for a 54 + * couple of reasons: 55 + * 56 + * 1. The timer counter underflows, but we haven't handled the 57 + * resulting interrupt and incremented jiffies yet. 58 + * 2. Hardware problem with the timer, not giving us continuous time, 59 + * the counter does small "jumps" upwards on some Pentium systems, 60 + * (see c't 95/10 page 335 for Neptun bug.) 61 + * 62 + * Previous attempts to handle these cases intelligently were 63 + * buggy, so we just do the simple thing now. 64 + */ 65 + if (count > old_count && jifs == old_jifs) 66 + count = old_count; 67 + 68 + old_count = count; 69 + old_jifs = jifs; 70 + 71 + raw_spin_unlock_irqrestore(&i8253_lock, flags); 72 + 73 + count = (PIT_LATCH - 1) - count; 74 + 75 + return (cycle_t)(jifs * PIT_LATCH) + count; 76 + } 77 + 78 + static struct clocksource i8253_cs = { 79 + .name = "pit", 80 + .rating = 110, 81 + .read = i8253_read, 82 + .mask = CLOCKSOURCE_MASK(32), 83 + }; 84 + 85 + int __init clocksource_i8253_init(void) 86 + { 87 + return clocksource_register_hz(&i8253_cs, PIT_TICK_RATE); 88 + }
+3 -1
include/linux/clocksource.h
··· 161 161 /* 162 162 * First part of structure is read mostly 163 163 */ 164 - char *name; 164 + const char *name; 165 165 struct list_head list; 166 166 int rating; 167 167 cycle_t (*read)(struct clocksource *cs); ··· 340 340 #endif 341 341 342 342 extern void timekeeping_notify(struct clocksource *clock); 343 + 344 + extern int clocksource_i8253_init(void); 343 345 344 346 #endif /* _LINUX_CLOCKSOURCE_H */