Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86, nmi_watchdog: Remove all stub function calls from old nmi_watchdog

Now that the bulk of the old nmi_watchdog is gone, remove all
the stub variables and hooks associated with it.

This touches lots of files mainly because of how the io_apic
nmi_watchdog was implemented. Now that the io_apic nmi_watchdog
is forever gone, remove all its fingers.

Most of this code was not being exercised by virtue of
nmi_watchdog != NMI_IO_APIC, so there shouldn't be anything to
risky here.

Signed-off-by: Don Zickus <dzickus@redhat.com>
Cc: fweisbec@gmail.com
Cc: gorcunov@openvz.org
LKML-Reference: <1289578944-28564-3-git-send-email-dzickus@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Don Zickus and committed by
Ingo Molnar
072b198a 5f2b0ba4

+2 -819
-47
arch/x86/include/asm/nmi.h
··· 7 7 8 8 #ifdef ARCH_HAS_NMI_WATCHDOG 9 9 10 - /** 11 - * do_nmi_callback 12 - * 13 - * Check to see if a callback exists and execute it. Return 1 14 - * if the handler exists and was handled successfully. 15 - */ 16 - int do_nmi_callback(struct pt_regs *regs, int cpu); 17 - 18 10 extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); 19 - extern int check_nmi_watchdog(void); 20 11 extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); 21 12 extern int reserve_perfctr_nmi(unsigned int); 22 13 extern void release_perfctr_nmi(unsigned int); 23 14 extern int reserve_evntsel_nmi(unsigned int); 24 15 extern void release_evntsel_nmi(unsigned int); 25 - 26 - extern void setup_apic_nmi_watchdog(void *); 27 - extern void stop_apic_nmi_watchdog(void *); 28 - extern void disable_timer_nmi_watchdog(void); 29 - extern void enable_timer_nmi_watchdog(void); 30 - extern void cpu_nmi_set_wd_enabled(void); 31 - 32 - extern atomic_t nmi_active; 33 - extern unsigned int nmi_watchdog; 34 - #define NMI_NONE 0 35 - #define NMI_IO_APIC 1 36 - #define NMI_LOCAL_APIC 2 37 - #define NMI_INVALID 3 38 16 39 17 struct ctl_table; 40 18 extern int proc_nmi_enabled(struct ctl_table *, int , ··· 21 43 22 44 void arch_trigger_all_cpu_backtrace(void); 23 45 #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 24 - 25 - static inline void localise_nmi_watchdog(void) 26 - { 27 - if (nmi_watchdog == NMI_IO_APIC) 28 - nmi_watchdog = NMI_LOCAL_APIC; 29 - } 30 - 31 - /* check if nmi_watchdog is active (ie was specified at boot) */ 32 - static inline int nmi_watchdog_active(void) 33 - { 34 - /* 35 - * actually it should be: 36 - * return (nmi_watchdog == NMI_LOCAL_APIC || 37 - * nmi_watchdog == NMI_IO_APIC) 38 - * but since they are power of two we could use a 39 - * cheaper way --cvg 40 - */ 41 - return nmi_watchdog & (NMI_LOCAL_APIC | NMI_IO_APIC); 42 - } 43 46 #endif 44 47 45 - void lapic_watchdog_stop(void); 46 - int lapic_watchdog_init(unsigned nmi_hz); 47 - int lapic_wd_event(unsigned nmi_hz); 48 - unsigned lapic_adjust_nmi_hz(unsigned hz); 49 - void disable_lapic_nmi_watchdog(void); 50 - void enable_lapic_nmi_watchdog(void); 51 48 void stop_nmi(void); 52 49 void restart_nmi(void); 53 50
-1
arch/x86/include/asm/smpboot_hooks.h
··· 48 48 setup_IO_APIC(); 49 49 else { 50 50 nr_ioapics = 0; 51 - localise_nmi_watchdog(); 52 51 } 53 52 #endif 54 53 }
-6
arch/x86/include/asm/timer.h
··· 10 10 unsigned long long native_sched_clock(void); 11 11 extern int recalibrate_cpu_khz(void); 12 12 13 - #if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) 14 - extern int timer_ack; 15 - #else 16 - # define timer_ack (0) 17 - #endif 18 - 19 13 extern int no_timer_check; 20 14 21 15 /* Accelerators for sched_clock()
+1 -14
arch/x86/kernel/apic/apic.c
··· 31 31 #include <linux/init.h> 32 32 #include <linux/cpu.h> 33 33 #include <linux/dmi.h> 34 - #include <linux/nmi.h> 35 34 #include <linux/smp.h> 36 35 #include <linux/mm.h> 37 36 ··· 798 799 * PIT/HPET going. Otherwise register lapic as a dummy 799 800 * device. 800 801 */ 801 - if (nmi_watchdog != NMI_IO_APIC) 802 - lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; 803 - else 804 - pr_warning("APIC timer registered as dummy," 805 - " due to nmi_watchdog=%d!\n", nmi_watchdog); 802 + lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; 806 803 807 804 /* Setup the lapic or request the broadcast */ 808 805 setup_APIC_timer(); ··· 1382 1387 } 1383 1388 #endif 1384 1389 1385 - setup_apic_nmi_watchdog(NULL); 1386 1390 apic_pm_activate(); 1387 1391 } 1388 1392 ··· 1744 1750 setup_IO_APIC(); 1745 1751 else { 1746 1752 nr_ioapics = 0; 1747 - localise_nmi_watchdog(); 1748 1753 } 1749 - #else 1750 - localise_nmi_watchdog(); 1751 1754 #endif 1752 1755 1753 1756 x86_init.timers.setup_percpu_clockev(); 1754 - #ifdef CONFIG_X86_64 1755 - check_nmi_watchdog(); 1756 - #endif 1757 - 1758 1757 return 0; 1759 1758 } 1760 1759
-10
arch/x86/kernel/apic/hw_nmi.c
··· 94 94 #endif 95 95 96 96 /* STUB calls to mimic old nmi_watchdog behaviour */ 97 - #if defined(CONFIG_X86_LOCAL_APIC) 98 - unsigned int nmi_watchdog = NMI_NONE; 99 - EXPORT_SYMBOL(nmi_watchdog); 100 - #endif 101 - atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ 102 - EXPORT_SYMBOL(nmi_active); 103 97 int unknown_nmi_panic; 104 - void cpu_nmi_set_wd_enabled(void) { return; } 105 - void stop_apic_nmi_watchdog(void *unused) { return; } 106 - void setup_apic_nmi_watchdog(void *unused) { return; } 107 - int __init check_nmi_watchdog(void) { return 0; }
-46
arch/x86/kernel/apic/io_apic.c
··· 54 54 #include <asm/dma.h> 55 55 #include <asm/timer.h> 56 56 #include <asm/i8259.h> 57 - #include <asm/nmi.h> 58 57 #include <asm/msidef.h> 59 58 #include <asm/hypertransport.h> 60 59 #include <asm/setup.h> ··· 2642 2643 "edge"); 2643 2644 } 2644 2645 2645 - static void __init setup_nmi(void) 2646 - { 2647 - /* 2648 - * Dirty trick to enable the NMI watchdog ... 2649 - * We put the 8259A master into AEOI mode and 2650 - * unmask on all local APICs LVT0 as NMI. 2651 - * 2652 - * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire') 2653 - * is from Maciej W. Rozycki - so we do not have to EOI from 2654 - * the NMI handler or the timer interrupt. 2655 - */ 2656 - apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ..."); 2657 - 2658 - enable_NMI_through_LVT0(); 2659 - 2660 - apic_printk(APIC_VERBOSE, " done.\n"); 2661 - } 2662 - 2663 2646 /* 2664 2647 * This looks a bit hackish but it's about the only one way of sending 2665 2648 * a few INTA cycles to 8259As and any associated glue logic. ICR does ··· 2747 2766 */ 2748 2767 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2749 2768 legacy_pic->init(1); 2750 - #ifdef CONFIG_X86_32 2751 - { 2752 - unsigned int ver; 2753 - 2754 - ver = apic_read(APIC_LVR); 2755 - ver = GET_APIC_VERSION(ver); 2756 - timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); 2757 - } 2758 - #endif 2759 2769 2760 2770 pin1 = find_isa_irq_pin(0, mp_INT); 2761 2771 apic1 = find_isa_irq_apic(0, mp_INT); ··· 2794 2822 unmask_ioapic(cfg); 2795 2823 } 2796 2824 if (timer_irq_works()) { 2797 - if (nmi_watchdog == NMI_IO_APIC) { 2798 - setup_nmi(); 2799 - legacy_pic->unmask(0); 2800 - } 2801 2825 if (disable_timer_pin_1 > 0) 2802 2826 clear_IO_APIC_pin(0, pin1); 2803 2827 goto out; ··· 2819 2851 if (timer_irq_works()) { 2820 2852 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2821 2853 timer_through_8259 = 1; 2822 - if (nmi_watchdog == NMI_IO_APIC) { 2823 - legacy_pic->mask(0); 2824 - setup_nmi(); 2825 - legacy_pic->unmask(0); 2826 - } 2827 2854 goto out; 2828 2855 } 2829 2856 /* ··· 2829 2866 clear_IO_APIC_pin(apic2, pin2); 2830 2867 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2831 2868 } 2832 - 2833 - if (nmi_watchdog == NMI_IO_APIC) { 2834 - apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work " 2835 - "through the IO-APIC - disabling NMI Watchdog!\n"); 2836 - nmi_watchdog = NMI_NONE; 2837 - } 2838 - #ifdef CONFIG_X86_32 2839 - timer_ack = 0; 2840 - #endif 2841 2869 2842 2870 apic_printk(APIC_QUIET, KERN_INFO 2843 2871 "...trying to set up timer as Virtual Wire IRQ...\n");
-9
arch/x86/kernel/cpu/perf_event.c
··· 330 330 { 331 331 int i; 332 332 333 - if (nmi_watchdog == NMI_LOCAL_APIC) 334 - disable_lapic_nmi_watchdog(); 335 - 336 333 for (i = 0; i < x86_pmu.num_counters; i++) { 337 334 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) 338 335 goto perfctr_fail; ··· 352 355 for (i--; i >= 0; i--) 353 356 release_perfctr_nmi(x86_pmu.perfctr + i); 354 357 355 - if (nmi_watchdog == NMI_LOCAL_APIC) 356 - enable_lapic_nmi_watchdog(); 357 - 358 358 return false; 359 359 } 360 360 ··· 363 369 release_perfctr_nmi(x86_pmu.perfctr + i); 364 370 release_evntsel_nmi(x86_pmu.eventsel + i); 365 371 } 366 - 367 - if (nmi_watchdog == NMI_LOCAL_APIC) 368 - enable_lapic_nmi_watchdog(); 369 372 } 370 373 371 374 #else
-642
arch/x86/kernel/cpu/perfctr-watchdog.c
··· 22 22 #include <asm/apic.h> 23 23 #include <asm/perf_event.h> 24 24 25 - struct nmi_watchdog_ctlblk { 26 - unsigned int cccr_msr; 27 - unsigned int perfctr_msr; /* the MSR to reset in NMI handler */ 28 - unsigned int evntsel_msr; /* the MSR to select the events to handle */ 29 - }; 30 - 31 - /* Interface defining a CPU specific perfctr watchdog */ 32 - struct wd_ops { 33 - int (*reserve)(void); 34 - void (*unreserve)(void); 35 - int (*setup)(unsigned nmi_hz); 36 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz); 37 - void (*stop)(void); 38 - unsigned perfctr; 39 - unsigned evntsel; 40 - u64 checkbit; 41 - }; 42 - 43 - static const struct wd_ops *wd_ops; 44 - 45 25 /* 46 26 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's 47 27 * offset from MSR_P4_BSU_ESCR0. ··· 39 59 */ 40 60 static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS); 41 61 static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS); 42 - 43 - static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); 44 62 45 63 /* converts an msr to an appropriate reservation bit */ 46 64 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) ··· 150 172 clear_bit(counter, evntsel_nmi_owner); 151 173 } 152 174 EXPORT_SYMBOL(release_evntsel_nmi); 153 - 154 - void disable_lapic_nmi_watchdog(void) 155 - { 156 - BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); 157 - 158 - if (atomic_read(&nmi_active) <= 0) 159 - return; 160 - 161 - on_each_cpu(stop_apic_nmi_watchdog, NULL, 1); 162 - 163 - if (wd_ops) 164 - wd_ops->unreserve(); 165 - 166 - BUG_ON(atomic_read(&nmi_active) != 0); 167 - } 168 - 169 - void enable_lapic_nmi_watchdog(void) 170 - { 171 - BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); 172 - 173 - /* are we already enabled */ 174 - if (atomic_read(&nmi_active) != 0) 175 - return; 176 - 177 - /* are we lapic aware */ 178 - if (!wd_ops) 179 - return; 180 - if (!wd_ops->reserve()) { 181 - printk(KERN_ERR "NMI watchdog: cannot reserve perfctrs\n"); 182 - return; 183 - } 184 - 185 - on_each_cpu(setup_apic_nmi_watchdog, NULL, 1); 186 - touch_nmi_watchdog(); 187 - } 188 - 189 - /* 190 - * Activate the NMI watchdog via the local APIC. 191 - */ 192 - 193 - static unsigned int adjust_for_32bit_ctr(unsigned int hz) 194 - { 195 - u64 counter_val; 196 - unsigned int retval = hz; 197 - 198 - /* 199 - * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter 200 - * are writable, with higher bits sign extending from bit 31. 201 - * So, we can only program the counter with 31 bit values and 202 - * 32nd bit should be 1, for 33.. to be 1. 203 - * Find the appropriate nmi_hz 204 - */ 205 - counter_val = (u64)cpu_khz * 1000; 206 - do_div(counter_val, retval); 207 - if (counter_val > 0x7fffffffULL) { 208 - u64 count = (u64)cpu_khz * 1000; 209 - do_div(count, 0x7fffffffUL); 210 - retval = count + 1; 211 - } 212 - return retval; 213 - } 214 - 215 - static void write_watchdog_counter(unsigned int perfctr_msr, 216 - const char *descr, unsigned nmi_hz) 217 - { 218 - u64 count = (u64)cpu_khz * 1000; 219 - 220 - do_div(count, nmi_hz); 221 - if (descr) 222 - pr_debug("setting %s to -0x%08Lx\n", descr, count); 223 - wrmsrl(perfctr_msr, 0 - count); 224 - } 225 - 226 - static void write_watchdog_counter32(unsigned int perfctr_msr, 227 - const char *descr, unsigned nmi_hz) 228 - { 229 - u64 count = (u64)cpu_khz * 1000; 230 - 231 - do_div(count, nmi_hz); 232 - if (descr) 233 - pr_debug("setting %s to -0x%08Lx\n", descr, count); 234 - wrmsr(perfctr_msr, (u32)(-count), 0); 235 - } 236 - 237 - /* 238 - * AMD K7/K8/Family10h/Family11h support. 239 - * AMD keeps this interface nicely stable so there is not much variety 240 - */ 241 - #define K7_EVNTSEL_ENABLE (1 << 22) 242 - #define K7_EVNTSEL_INT (1 << 20) 243 - #define K7_EVNTSEL_OS (1 << 17) 244 - #define K7_EVNTSEL_USR (1 << 16) 245 - #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 246 - #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 247 - 248 - static int setup_k7_watchdog(unsigned nmi_hz) 249 - { 250 - unsigned int perfctr_msr, evntsel_msr; 251 - unsigned int evntsel; 252 - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 253 - 254 - perfctr_msr = wd_ops->perfctr; 255 - evntsel_msr = wd_ops->evntsel; 256 - 257 - wrmsrl(perfctr_msr, 0UL); 258 - 259 - evntsel = K7_EVNTSEL_INT 260 - | K7_EVNTSEL_OS 261 - | K7_EVNTSEL_USR 262 - | K7_NMI_EVENT; 263 - 264 - /* setup the timer */ 265 - wrmsr(evntsel_msr, evntsel, 0); 266 - write_watchdog_counter(perfctr_msr, "K7_PERFCTR0", nmi_hz); 267 - 268 - /* initialize the wd struct before enabling */ 269 - wd->perfctr_msr = perfctr_msr; 270 - wd->evntsel_msr = evntsel_msr; 271 - wd->cccr_msr = 0; /* unused */ 272 - 273 - /* ok, everything is initialized, announce that we're set */ 274 - cpu_nmi_set_wd_enabled(); 275 - 276 - apic_write(APIC_LVTPC, APIC_DM_NMI); 277 - evntsel |= K7_EVNTSEL_ENABLE; 278 - wrmsr(evntsel_msr, evntsel, 0); 279 - 280 - return 1; 281 - } 282 - 283 - static void single_msr_stop_watchdog(void) 284 - { 285 - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 286 - 287 - wrmsr(wd->evntsel_msr, 0, 0); 288 - } 289 - 290 - static int single_msr_reserve(void) 291 - { 292 - if (!reserve_perfctr_nmi(wd_ops->perfctr)) 293 - return 0; 294 - 295 - if (!reserve_evntsel_nmi(wd_ops->evntsel)) { 296 - release_perfctr_nmi(wd_ops->perfctr); 297 - return 0; 298 - } 299 - return 1; 300 - } 301 - 302 - static void single_msr_unreserve(void) 303 - { 304 - release_evntsel_nmi(wd_ops->evntsel); 305 - release_perfctr_nmi(wd_ops->perfctr); 306 - } 307 - 308 - static void __kprobes 309 - single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) 310 - { 311 - /* start the cycle over again */ 312 - write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); 313 - } 314 - 315 - static const struct wd_ops k7_wd_ops = { 316 - .reserve = single_msr_reserve, 317 - .unreserve = single_msr_unreserve, 318 - .setup = setup_k7_watchdog, 319 - .rearm = single_msr_rearm, 320 - .stop = single_msr_stop_watchdog, 321 - .perfctr = MSR_K7_PERFCTR0, 322 - .evntsel = MSR_K7_EVNTSEL0, 323 - .checkbit = 1ULL << 47, 324 - }; 325 - 326 - /* 327 - * Intel Model 6 (PPro+,P2,P3,P-M,Core1) 328 - */ 329 - #define P6_EVNTSEL0_ENABLE (1 << 22) 330 - #define P6_EVNTSEL_INT (1 << 20) 331 - #define P6_EVNTSEL_OS (1 << 17) 332 - #define P6_EVNTSEL_USR (1 << 16) 333 - #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79 334 - #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED 335 - 336 - static int setup_p6_watchdog(unsigned nmi_hz) 337 - { 338 - unsigned int perfctr_msr, evntsel_msr; 339 - unsigned int evntsel; 340 - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 341 - 342 - perfctr_msr = wd_ops->perfctr; 343 - evntsel_msr = wd_ops->evntsel; 344 - 345 - /* KVM doesn't implement this MSR */ 346 - if (wrmsr_safe(perfctr_msr, 0, 0) < 0) 347 - return 0; 348 - 349 - evntsel = P6_EVNTSEL_INT 350 - | P6_EVNTSEL_OS 351 - | P6_EVNTSEL_USR 352 - | P6_NMI_EVENT; 353 - 354 - /* setup the timer */ 355 - wrmsr(evntsel_msr, evntsel, 0); 356 - nmi_hz = adjust_for_32bit_ctr(nmi_hz); 357 - write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0", nmi_hz); 358 - 359 - /* initialize the wd struct before enabling */ 360 - wd->perfctr_msr = perfctr_msr; 361 - wd->evntsel_msr = evntsel_msr; 362 - wd->cccr_msr = 0; /* unused */ 363 - 364 - /* ok, everything is initialized, announce that we're set */ 365 - cpu_nmi_set_wd_enabled(); 366 - 367 - apic_write(APIC_LVTPC, APIC_DM_NMI); 368 - evntsel |= P6_EVNTSEL0_ENABLE; 369 - wrmsr(evntsel_msr, evntsel, 0); 370 - 371 - return 1; 372 - } 373 - 374 - static void __kprobes p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) 375 - { 376 - /* 377 - * P6 based Pentium M need to re-unmask 378 - * the apic vector but it doesn't hurt 379 - * other P6 variant. 380 - * ArchPerfom/Core Duo also needs this 381 - */ 382 - apic_write(APIC_LVTPC, APIC_DM_NMI); 383 - 384 - /* P6/ARCH_PERFMON has 32 bit counter write */ 385 - write_watchdog_counter32(wd->perfctr_msr, NULL, nmi_hz); 386 - } 387 - 388 - static const struct wd_ops p6_wd_ops = { 389 - .reserve = single_msr_reserve, 390 - .unreserve = single_msr_unreserve, 391 - .setup = setup_p6_watchdog, 392 - .rearm = p6_rearm, 393 - .stop = single_msr_stop_watchdog, 394 - .perfctr = MSR_P6_PERFCTR0, 395 - .evntsel = MSR_P6_EVNTSEL0, 396 - .checkbit = 1ULL << 39, 397 - }; 398 - 399 - /* 400 - * Intel P4 performance counters. 401 - * By far the most complicated of all. 402 - */ 403 - #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1 << 7) 404 - #define P4_ESCR_EVENT_SELECT(N) ((N) << 25) 405 - #define P4_ESCR_OS (1 << 3) 406 - #define P4_ESCR_USR (1 << 2) 407 - #define P4_CCCR_OVF_PMI0 (1 << 26) 408 - #define P4_CCCR_OVF_PMI1 (1 << 27) 409 - #define P4_CCCR_THRESHOLD(N) ((N) << 20) 410 - #define P4_CCCR_COMPLEMENT (1 << 19) 411 - #define P4_CCCR_COMPARE (1 << 18) 412 - #define P4_CCCR_REQUIRED (3 << 16) 413 - #define P4_CCCR_ESCR_SELECT(N) ((N) << 13) 414 - #define P4_CCCR_ENABLE (1 << 12) 415 - #define P4_CCCR_OVF (1 << 31) 416 - 417 - #define P4_CONTROLS 18 418 - static unsigned int p4_controls[18] = { 419 - MSR_P4_BPU_CCCR0, 420 - MSR_P4_BPU_CCCR1, 421 - MSR_P4_BPU_CCCR2, 422 - MSR_P4_BPU_CCCR3, 423 - MSR_P4_MS_CCCR0, 424 - MSR_P4_MS_CCCR1, 425 - MSR_P4_MS_CCCR2, 426 - MSR_P4_MS_CCCR3, 427 - MSR_P4_FLAME_CCCR0, 428 - MSR_P4_FLAME_CCCR1, 429 - MSR_P4_FLAME_CCCR2, 430 - MSR_P4_FLAME_CCCR3, 431 - MSR_P4_IQ_CCCR0, 432 - MSR_P4_IQ_CCCR1, 433 - MSR_P4_IQ_CCCR2, 434 - MSR_P4_IQ_CCCR3, 435 - MSR_P4_IQ_CCCR4, 436 - MSR_P4_IQ_CCCR5, 437 - }; 438 - /* 439 - * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter 440 - * CRU_ESCR0 (with any non-null event selector) through a complemented 441 - * max threshold. [IA32-Vol3, Section 14.9.9] 442 - */ 443 - static int setup_p4_watchdog(unsigned nmi_hz) 444 - { 445 - unsigned int perfctr_msr, evntsel_msr, cccr_msr; 446 - unsigned int evntsel, cccr_val; 447 - unsigned int misc_enable, dummy; 448 - unsigned int ht_num; 449 - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 450 - 451 - rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy); 452 - if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL)) 453 - return 0; 454 - 455 - #ifdef CONFIG_SMP 456 - /* detect which hyperthread we are on */ 457 - if (smp_num_siblings == 2) { 458 - unsigned int ebx, apicid; 459 - 460 - ebx = cpuid_ebx(1); 461 - apicid = (ebx >> 24) & 0xff; 462 - ht_num = apicid & 1; 463 - } else 464 - #endif 465 - ht_num = 0; 466 - 467 - /* 468 - * performance counters are shared resources 469 - * assign each hyperthread its own set 470 - * (re-use the ESCR0 register, seems safe 471 - * and keeps the cccr_val the same) 472 - */ 473 - if (!ht_num) { 474 - /* logical cpu 0 */ 475 - perfctr_msr = MSR_P4_IQ_PERFCTR0; 476 - evntsel_msr = MSR_P4_CRU_ESCR0; 477 - cccr_msr = MSR_P4_IQ_CCCR0; 478 - cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); 479 - 480 - /* 481 - * If we're on the kdump kernel or other situation, we may 482 - * still have other performance counter registers set to 483 - * interrupt and they'll keep interrupting forever because 484 - * of the P4_CCCR_OVF quirk. So we need to ACK all the 485 - * pending interrupts and disable all the registers here, 486 - * before reenabling the NMI delivery. Refer to p4_rearm() 487 - * about the P4_CCCR_OVF quirk. 488 - */ 489 - if (reset_devices) { 490 - unsigned int low, high; 491 - int i; 492 - 493 - for (i = 0; i < P4_CONTROLS; i++) { 494 - rdmsr(p4_controls[i], low, high); 495 - low &= ~(P4_CCCR_ENABLE | P4_CCCR_OVF); 496 - wrmsr(p4_controls[i], low, high); 497 - } 498 - } 499 - } else { 500 - /* logical cpu 1 */ 501 - perfctr_msr = MSR_P4_IQ_PERFCTR1; 502 - evntsel_msr = MSR_P4_CRU_ESCR0; 503 - cccr_msr = MSR_P4_IQ_CCCR1; 504 - 505 - /* Pentium 4 D processors don't support P4_CCCR_OVF_PMI1 */ 506 - if (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask == 4) 507 - cccr_val = P4_CCCR_OVF_PMI0; 508 - else 509 - cccr_val = P4_CCCR_OVF_PMI1; 510 - cccr_val |= P4_CCCR_ESCR_SELECT(4); 511 - } 512 - 513 - evntsel = P4_ESCR_EVENT_SELECT(0x3F) 514 - | P4_ESCR_OS 515 - | P4_ESCR_USR; 516 - 517 - cccr_val |= P4_CCCR_THRESHOLD(15) 518 - | P4_CCCR_COMPLEMENT 519 - | P4_CCCR_COMPARE 520 - | P4_CCCR_REQUIRED; 521 - 522 - wrmsr(evntsel_msr, evntsel, 0); 523 - wrmsr(cccr_msr, cccr_val, 0); 524 - write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz); 525 - 526 - wd->perfctr_msr = perfctr_msr; 527 - wd->evntsel_msr = evntsel_msr; 528 - wd->cccr_msr = cccr_msr; 529 - 530 - /* ok, everything is initialized, announce that we're set */ 531 - cpu_nmi_set_wd_enabled(); 532 - 533 - apic_write(APIC_LVTPC, APIC_DM_NMI); 534 - cccr_val |= P4_CCCR_ENABLE; 535 - wrmsr(cccr_msr, cccr_val, 0); 536 - return 1; 537 - } 538 - 539 - static void stop_p4_watchdog(void) 540 - { 541 - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 542 - wrmsr(wd->cccr_msr, 0, 0); 543 - wrmsr(wd->evntsel_msr, 0, 0); 544 - } 545 - 546 - static int p4_reserve(void) 547 - { 548 - if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0)) 549 - return 0; 550 - #ifdef CONFIG_SMP 551 - if (smp_num_siblings > 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1)) 552 - goto fail1; 553 - #endif 554 - if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0)) 555 - goto fail2; 556 - /* RED-PEN why is ESCR1 not reserved here? */ 557 - return 1; 558 - fail2: 559 - #ifdef CONFIG_SMP 560 - if (smp_num_siblings > 1) 561 - release_perfctr_nmi(MSR_P4_IQ_PERFCTR1); 562 - fail1: 563 - #endif 564 - release_perfctr_nmi(MSR_P4_IQ_PERFCTR0); 565 - return 0; 566 - } 567 - 568 - static void p4_unreserve(void) 569 - { 570 - #ifdef CONFIG_SMP 571 - if (smp_num_siblings > 1) 572 - release_perfctr_nmi(MSR_P4_IQ_PERFCTR1); 573 - #endif 574 - release_evntsel_nmi(MSR_P4_CRU_ESCR0); 575 - release_perfctr_nmi(MSR_P4_IQ_PERFCTR0); 576 - } 577 - 578 - static void __kprobes p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) 579 - { 580 - unsigned dummy; 581 - /* 582 - * P4 quirks: 583 - * - An overflown perfctr will assert its interrupt 584 - * until the OVF flag in its CCCR is cleared. 585 - * - LVTPC is masked on interrupt and must be 586 - * unmasked by the LVTPC handler. 587 - */ 588 - rdmsrl(wd->cccr_msr, dummy); 589 - dummy &= ~P4_CCCR_OVF; 590 - wrmsrl(wd->cccr_msr, dummy); 591 - apic_write(APIC_LVTPC, APIC_DM_NMI); 592 - /* start the cycle over again */ 593 - write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); 594 - } 595 - 596 - static const struct wd_ops p4_wd_ops = { 597 - .reserve = p4_reserve, 598 - .unreserve = p4_unreserve, 599 - .setup = setup_p4_watchdog, 600 - .rearm = p4_rearm, 601 - .stop = stop_p4_watchdog, 602 - /* RED-PEN this is wrong for the other sibling */ 603 - .perfctr = MSR_P4_BPU_PERFCTR0, 604 - .evntsel = MSR_P4_BSU_ESCR0, 605 - .checkbit = 1ULL << 39, 606 - }; 607 - 608 - /* 609 - * Watchdog using the Intel architected PerfMon. 610 - * Used for Core2 and hopefully all future Intel CPUs. 611 - */ 612 - #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 613 - #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK 614 - 615 - static struct wd_ops intel_arch_wd_ops; 616 - 617 - static int setup_intel_arch_watchdog(unsigned nmi_hz) 618 - { 619 - unsigned int ebx; 620 - union cpuid10_eax eax; 621 - unsigned int unused; 622 - unsigned int perfctr_msr, evntsel_msr; 623 - unsigned int evntsel; 624 - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 625 - 626 - /* 627 - * Check whether the Architectural PerfMon supports 628 - * Unhalted Core Cycles Event or not. 629 - * NOTE: Corresponding bit = 0 in ebx indicates event present. 630 - */ 631 - cpuid(10, &(eax.full), &ebx, &unused, &unused); 632 - if ((eax.split.mask_length < 633 - (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || 634 - (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) 635 - return 0; 636 - 637 - perfctr_msr = wd_ops->perfctr; 638 - evntsel_msr = wd_ops->evntsel; 639 - 640 - wrmsrl(perfctr_msr, 0UL); 641 - 642 - evntsel = ARCH_PERFMON_EVENTSEL_INT 643 - | ARCH_PERFMON_EVENTSEL_OS 644 - | ARCH_PERFMON_EVENTSEL_USR 645 - | ARCH_PERFMON_NMI_EVENT_SEL 646 - | ARCH_PERFMON_NMI_EVENT_UMASK; 647 - 648 - /* setup the timer */ 649 - wrmsr(evntsel_msr, evntsel, 0); 650 - nmi_hz = adjust_for_32bit_ctr(nmi_hz); 651 - write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz); 652 - 653 - wd->perfctr_msr = perfctr_msr; 654 - wd->evntsel_msr = evntsel_msr; 655 - wd->cccr_msr = 0; /* unused */ 656 - 657 - /* ok, everything is initialized, announce that we're set */ 658 - cpu_nmi_set_wd_enabled(); 659 - 660 - apic_write(APIC_LVTPC, APIC_DM_NMI); 661 - evntsel |= ARCH_PERFMON_EVENTSEL_ENABLE; 662 - wrmsr(evntsel_msr, evntsel, 0); 663 - intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); 664 - return 1; 665 - } 666 - 667 - static struct wd_ops intel_arch_wd_ops __read_mostly = { 668 - .reserve = single_msr_reserve, 669 - .unreserve = single_msr_unreserve, 670 - .setup = setup_intel_arch_watchdog, 671 - .rearm = p6_rearm, 672 - .stop = single_msr_stop_watchdog, 673 - .perfctr = MSR_ARCH_PERFMON_PERFCTR1, 674 - .evntsel = MSR_ARCH_PERFMON_EVENTSEL1, 675 - }; 676 - 677 - static void probe_nmi_watchdog(void) 678 - { 679 - switch (boot_cpu_data.x86_vendor) { 680 - case X86_VENDOR_AMD: 681 - if (boot_cpu_data.x86 == 6 || 682 - (boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x15)) 683 - wd_ops = &k7_wd_ops; 684 - return; 685 - case X86_VENDOR_INTEL: 686 - /* Work around where perfctr1 doesn't have a working enable 687 - * bit as described in the following errata: 688 - * AE49 Core Duo and Intel Core Solo 65 nm 689 - * AN49 Intel Pentium Dual-Core 690 - * AF49 Dual-Core Intel Xeon Processor LV 691 - */ 692 - if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) || 693 - ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 15 && 694 - boot_cpu_data.x86_mask == 4))) { 695 - intel_arch_wd_ops.perfctr = MSR_ARCH_PERFMON_PERFCTR0; 696 - intel_arch_wd_ops.evntsel = MSR_ARCH_PERFMON_EVENTSEL0; 697 - } 698 - if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { 699 - wd_ops = &intel_arch_wd_ops; 700 - break; 701 - } 702 - switch (boot_cpu_data.x86) { 703 - case 6: 704 - if (boot_cpu_data.x86_model > 13) 705 - return; 706 - 707 - wd_ops = &p6_wd_ops; 708 - break; 709 - case 15: 710 - wd_ops = &p4_wd_ops; 711 - break; 712 - default: 713 - return; 714 - } 715 - break; 716 - } 717 - } 718 - 719 - /* Interface to nmi.c */ 720 - 721 - int lapic_watchdog_init(unsigned nmi_hz) 722 - { 723 - if (!wd_ops) { 724 - probe_nmi_watchdog(); 725 - if (!wd_ops) { 726 - printk(KERN_INFO "NMI watchdog: CPU not supported\n"); 727 - return -1; 728 - } 729 - 730 - if (!wd_ops->reserve()) { 731 - printk(KERN_ERR 732 - "NMI watchdog: cannot reserve perfctrs\n"); 733 - return -1; 734 - } 735 - } 736 - 737 - if (!(wd_ops->setup(nmi_hz))) { 738 - printk(KERN_ERR "Cannot setup NMI watchdog on CPU %d\n", 739 - raw_smp_processor_id()); 740 - return -1; 741 - } 742 - 743 - return 0; 744 - } 745 - 746 - void lapic_watchdog_stop(void) 747 - { 748 - if (wd_ops) 749 - wd_ops->stop(); 750 - } 751 - 752 - unsigned lapic_adjust_nmi_hz(unsigned hz) 753 - { 754 - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 755 - if (wd->perfctr_msr == MSR_P6_PERFCTR0 || 756 - wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1) 757 - hz = adjust_for_32bit_ctr(hz); 758 - return hz; 759 - } 760 - 761 - int __kprobes lapic_wd_event(unsigned nmi_hz) 762 - { 763 - struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 764 - u64 ctr; 765 - 766 - rdmsrl(wd->perfctr_msr, ctr); 767 - if (ctr & wd_ops->checkbit) /* perfctr still running? */ 768 - return 0; 769 - 770 - wd_ops->rearm(wd, nmi_hz); 771 - return 1; 772 - }
-11
arch/x86/kernel/smpboot.c
··· 316 316 */ 317 317 check_tsc_sync_target(); 318 318 319 - if (nmi_watchdog == NMI_IO_APIC) { 320 - legacy_pic->mask(0); 321 - enable_NMI_through_LVT0(); 322 - legacy_pic->unmask(0); 323 - } 324 - 325 319 /* This must be done before setting cpu_online_mask */ 326 320 set_cpu_sibling_map(raw_smp_processor_id()); 327 321 wmb(); ··· 1055 1061 printk(KERN_INFO "SMP mode deactivated.\n"); 1056 1062 smpboot_clear_io_apic(); 1057 1063 1058 - localise_nmi_watchdog(); 1059 - 1060 1064 connect_bsp_APIC(); 1061 1065 setup_local_APIC(); 1062 1066 end_local_APIC_setup(); ··· 1188 1196 #ifdef CONFIG_X86_IO_APIC 1189 1197 setup_ioapic_dest(); 1190 1198 #endif 1191 - check_nmi_watchdog(); 1192 1199 mtrr_aps_init(); 1193 1200 } 1194 1201 ··· 1332 1341 if (cpu == 0) 1333 1342 return -EBUSY; 1334 1343 1335 - if (nmi_watchdog == NMI_LOCAL_APIC) 1336 - stop_apic_nmi_watchdog(NULL); 1337 1344 clear_local_APIC(); 1338 1345 1339 1346 cpu_disable_common();
-18
arch/x86/kernel/time.c
··· 22 22 #include <asm/hpet.h> 23 23 #include <asm/time.h> 24 24 25 - #if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) 26 - int timer_ack; 27 - #endif 28 - 29 25 #ifdef CONFIG_X86_64 30 26 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; 31 27 #endif ··· 58 62 { 59 63 /* Keep nmi watchdog up to date */ 60 64 inc_irq_stat(irq0_irqs); 61 - 62 - /* Optimized out for !IO_APIC and x86_64 */ 63 - if (timer_ack) { 64 - /* 65 - * Subtle, when I/O APICs are used we have to ack timer IRQ 66 - * manually to deassert NMI lines for the watchdog if run 67 - * on an 82489DX-based system. 68 - */ 69 - raw_spin_lock(&i8259A_lock); 70 - outb(0x0c, PIC_MASTER_OCW3); 71 - /* Ack the IRQ; AEOI will end it automatically. */ 72 - inb(PIC_MASTER_POLL); 73 - raw_spin_unlock(&i8259A_lock); 74 - } 75 65 76 66 global_clock_event->event_handler(global_clock_event); 77 67
-2
arch/x86/kernel/traps.c
··· 437 437 438 438 void stop_nmi(void) 439 439 { 440 - acpi_nmi_disable(); 441 440 ignore_nmis++; 442 441 } 443 442 444 443 void restart_nmi(void) 445 444 { 446 445 ignore_nmis--; 447 - acpi_nmi_enable(); 448 446 } 449 447 450 448 /* May run on IST stack. */
-3
arch/x86/oprofile/nmi_timer_int.c
··· 58 58 59 59 int __init op_nmi_timer_init(struct oprofile_operations *ops) 60 60 { 61 - if ((nmi_watchdog != NMI_IO_APIC) || (atomic_read(&nmi_active) <= 0)) 62 - return -ENODEV; 63 - 64 61 ops->start = timer_start; 65 62 ops->stop = timer_stop; 66 63 ops->cpu_type = "timer";
-2
drivers/acpi/acpica/nsinit.c
··· 577 577 * as possible (without an NMI being received in the middle of 578 578 * this) - so disable NMIs and initialize the device: 579 579 */ 580 - acpi_nmi_disable(); 581 580 status = acpi_ns_evaluate(info); 582 - acpi_nmi_enable(); 583 581 584 582 if (ACPI_SUCCESS(status)) { 585 583 walk_info->num_INI++;
+1 -6
drivers/watchdog/hpwdt.c
··· 649 649 * If nmi_watchdog is turned off then we can turn on 650 650 * our nmi decoding capability. 651 651 */ 652 - if (!nmi_watchdog_active()) 653 - hpwdt_nmi_decoding = 1; 654 - else 655 - dev_warn(&dev->dev, "NMI decoding is disabled. To enable this " 656 - "functionality you must reboot with nmi_watchdog=0 " 657 - "and load the hpwdt driver with priority=1.\n"); 652 + hpwdt_nmi_decoding = 1; 658 653 } 659 654 #else 660 655 static void __devinit hpwdt_check_nmi_decoding(struct pci_dev *dev)
-2
include/linux/nmi.h
··· 25 25 #else 26 26 extern void touch_nmi_watchdog(void); 27 27 #endif 28 - static inline void acpi_nmi_disable(void) { } 29 - static inline void acpi_nmi_enable(void) { } 30 28 31 29 /* 32 30 * Create trigger_all_cpu_backtrace() out of the arch-provided