Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
timers: handle HRTIMER_CB_IRQSAFE_UNLOCKED correctly from softirq context
nohz: disable tick_nohz_kick_tick() for now
irq: call __irq_enter() before calling the tick_idle_check
x86: HPET: enter hpet_interrupt_handler with interrupts disabled
x86: HPET: read from HPET_Tn_CMP() not HPET_T0_CMP
x86: HPET: convert WARN_ON to WARN_ON_ONCE

+26 -6
+2 -2
arch/x86/kernel/hpet.c
··· 322 322 * what we wrote hit the chip before we compare it to the 323 323 * counter. 324 324 */ 325 - WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt); 325 + WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt); 326 326 327 327 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; 328 328 } ··· 445 445 { 446 446 447 447 if (request_irq(dev->irq, hpet_interrupt_handler, 448 - IRQF_SHARED|IRQF_NOBALANCING, dev->name, dev)) 448 + IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev)) 449 449 return -1; 450 450 451 451 disable_irq(dev->irq);
+16 -1
kernel/hrtimer.c
··· 1209 1209 enum hrtimer_restart (*fn)(struct hrtimer *); 1210 1210 struct hrtimer *timer; 1211 1211 int restart; 1212 + int emulate_hardirq_ctx = 0; 1212 1213 1213 1214 timer = list_entry(cpu_base->cb_pending.next, 1214 1215 struct hrtimer, cb_entry); ··· 1218 1217 timer_stats_account_hrtimer(timer); 1219 1218 1220 1219 fn = timer->function; 1220 + /* 1221 + * A timer might have been added to the cb_pending list 1222 + * when it was migrated during a cpu-offline operation. 1223 + * Emulate hardirq context for such timers. 1224 + */ 1225 + if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || 1226 + timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) 1227 + emulate_hardirq_ctx = 1; 1228 + 1221 1229 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); 1222 1230 spin_unlock_irq(&cpu_base->lock); 1223 1231 1224 - restart = fn(timer); 1232 + if (unlikely(emulate_hardirq_ctx)) { 1233 + local_irq_disable(); 1234 + restart = fn(timer); 1235 + local_irq_enable(); 1236 + } else 1237 + restart = fn(timer); 1225 1238 1226 1239 spin_lock_irq(&cpu_base->lock); 1227 1240
+4 -3
kernel/softirq.c
··· 269 269 { 270 270 int cpu = smp_processor_id(); 271 271 272 - if (idle_cpu(cpu) && !in_interrupt()) 272 + if (idle_cpu(cpu) && !in_interrupt()) { 273 + __irq_enter(); 273 274 tick_check_idle(cpu); 274 - 275 - __irq_enter(); 275 + } else 276 + __irq_enter(); 276 277 } 277 278 278 279 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
+4
kernel/time/tick-sched.c
··· 568 568 */ 569 569 static void tick_nohz_kick_tick(int cpu) 570 570 { 571 + #if 0 572 + /* Switch back to 2.6.27 behaviour */ 573 + 571 574 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 572 575 ktime_t delta, now; 573 576 ··· 587 584 return; 588 585 589 586 tick_nohz_restart(ts, now); 587 + #endif 590 588 } 591 589 592 590 #else