Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
timers: handle HRTIMER_CB_IRQSAFE_UNLOCKED correctly from softirq context
nohz: disable tick_nohz_kick_tick() for now
irq: call __irq_enter() before calling the tick_idle_check
x86: HPET: enter hpet_interrupt_handler with interrupts disabled
x86: HPET: read from HPET_Tn_CMP() not HPET_T0_CMP
x86: HPET: convert WARN_ON to WARN_ON_ONCE

+26 -6
+2 -2
arch/x86/kernel/hpet.c
··· 322 * what we wrote hit the chip before we compare it to the 323 * counter. 324 */ 325 - WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt); 326 327 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; 328 } ··· 445 { 446 447 if (request_irq(dev->irq, hpet_interrupt_handler, 448 - IRQF_SHARED|IRQF_NOBALANCING, dev->name, dev)) 449 return -1; 450 451 disable_irq(dev->irq);
··· 322 * what we wrote hit the chip before we compare it to the 323 * counter. 324 */ 325 + WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt); 326 327 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; 328 } ··· 445 { 446 447 if (request_irq(dev->irq, hpet_interrupt_handler, 448 + IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev)) 449 return -1; 450 451 disable_irq(dev->irq);
+16 -1
kernel/hrtimer.c
··· 1209 enum hrtimer_restart (*fn)(struct hrtimer *); 1210 struct hrtimer *timer; 1211 int restart; 1212 1213 timer = list_entry(cpu_base->cb_pending.next, 1214 struct hrtimer, cb_entry); ··· 1218 timer_stats_account_hrtimer(timer); 1219 1220 fn = timer->function; 1221 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); 1222 spin_unlock_irq(&cpu_base->lock); 1223 1224 - restart = fn(timer); 1225 1226 spin_lock_irq(&cpu_base->lock); 1227
··· 1209 enum hrtimer_restart (*fn)(struct hrtimer *); 1210 struct hrtimer *timer; 1211 int restart; 1212 + int emulate_hardirq_ctx = 0; 1213 1214 timer = list_entry(cpu_base->cb_pending.next, 1215 struct hrtimer, cb_entry); ··· 1217 timer_stats_account_hrtimer(timer); 1218 1219 fn = timer->function; 1220 + /* 1221 + * A timer might have been added to the cb_pending list 1222 + * when it was migrated during a cpu-offline operation. 1223 + * Emulate hardirq context for such timers. 1224 + */ 1225 + if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || 1226 + timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) 1227 + emulate_hardirq_ctx = 1; 1228 + 1229 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); 1230 spin_unlock_irq(&cpu_base->lock); 1231 1232 + if (unlikely(emulate_hardirq_ctx)) { 1233 + local_irq_disable(); 1234 + restart = fn(timer); 1235 + local_irq_enable(); 1236 + } else 1237 + restart = fn(timer); 1238 1239 spin_lock_irq(&cpu_base->lock); 1240
+4 -3
kernel/softirq.c
··· 269 { 270 int cpu = smp_processor_id(); 271 272 - if (idle_cpu(cpu) && !in_interrupt()) 273 tick_check_idle(cpu); 274 - 275 - __irq_enter(); 276 } 277 278 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
··· 269 { 270 int cpu = smp_processor_id(); 271 272 + if (idle_cpu(cpu) && !in_interrupt()) { 273 + __irq_enter(); 274 tick_check_idle(cpu); 275 + } else 276 + __irq_enter(); 277 } 278 279 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
+4
kernel/time/tick-sched.c
··· 568 */ 569 static void tick_nohz_kick_tick(int cpu) 570 { 571 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 572 ktime_t delta, now; 573 ··· 587 return; 588 589 tick_nohz_restart(ts, now); 590 } 591 592 #else
··· 568 */ 569 static void tick_nohz_kick_tick(int cpu) 570 { 571 + #if 0 572 + /* Switch back to 2.6.27 behaviour */ 573 + 574 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 575 ktime_t delta, now; 576 ··· 584 return; 585 586 tick_nohz_restart(ts, now); 587 + #endif 588 } 589 590 #else