Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: add X86_FEATURE_XMM4_2 definitions
x86: fix cpufreq + sched_clock() regression
x86: fix HPET regression in 2.6.26 versus 2.6.25, check hpet against BAR, v3
x86: do not enable TSC notifier if we don't need it
x86 MCE: Fix CPU hotplug problem with multiple multicore AMD CPUs
x86: fix: make PCI ECS for AMD CPUs hotplug capable
x86: fix: do not run code in amd_bus.c on non-AMD CPUs

+142 -20
+5
arch/x86/kernel/cpu/mcheck/mce_64.c
··· 759 }; 760 761 DEFINE_PER_CPU(struct sys_device, device_mce); 762 763 /* Why are there no generic functions for this? */ 764 #define ACCESSOR(name, var, start) \ ··· 884 case CPU_ONLINE: 885 case CPU_ONLINE_FROZEN: 886 mce_create_device(cpu); 887 break; 888 case CPU_DEAD: 889 case CPU_DEAD_FROZEN: 890 mce_remove_device(cpu); 891 break; 892 }
··· 759 }; 760 761 DEFINE_PER_CPU(struct sys_device, device_mce); 762 + void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinitdata; 763 764 /* Why are there no generic functions for this? */ 765 #define ACCESSOR(name, var, start) \ ··· 883 case CPU_ONLINE: 884 case CPU_ONLINE_FROZEN: 885 mce_create_device(cpu); 886 + if (threshold_cpu_callback) 887 + threshold_cpu_callback(action, cpu); 888 break; 889 case CPU_DEAD: 890 case CPU_DEAD_FROZEN: 891 + if (threshold_cpu_callback) 892 + threshold_cpu_callback(action, cpu); 893 mce_remove_device(cpu); 894 break; 895 }
+5 -13
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
··· 628 deallocate_threshold_block(cpu, bank); 629 630 free_out: 631 kobject_put(b->kobj); 632 kfree(b); 633 per_cpu(threshold_banks, cpu)[bank] = NULL; ··· 646 } 647 648 /* get notified when a cpu comes on/off */ 649 - static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb, 650 - unsigned long action, void *hcpu) 651 { 652 - /* cpu was unsigned int to begin with */ 653 - unsigned int cpu = (unsigned long)hcpu; 654 - 655 if (cpu >= NR_CPUS) 656 - goto out; 657 658 switch (action) { 659 case CPU_ONLINE: ··· 664 default: 665 break; 666 } 667 - out: 668 - return NOTIFY_OK; 669 } 670 - 671 - static struct notifier_block threshold_cpu_notifier __cpuinitdata = { 672 - .notifier_call = threshold_cpu_callback, 673 - }; 674 675 static __init int threshold_init_device(void) 676 { ··· 676 if (err) 677 return err; 678 } 679 - register_hotcpu_notifier(&threshold_cpu_notifier); 680 return 0; 681 } 682
··· 628 deallocate_threshold_block(cpu, bank); 629 630 free_out: 631 + kobject_del(b->kobj); 632 kobject_put(b->kobj); 633 kfree(b); 634 per_cpu(threshold_banks, cpu)[bank] = NULL; ··· 645 } 646 647 /* get notified when a cpu comes on/off */ 648 + static void __cpuinit amd_64_threshold_cpu_callback(unsigned long action, 649 + unsigned int cpu) 650 { 651 if (cpu >= NR_CPUS) 652 + return; 653 654 switch (action) { 655 case CPU_ONLINE: ··· 666 default: 667 break; 668 } 669 } 670 671 static __init int threshold_init_device(void) 672 { ··· 684 if (err) 685 return err; 686 } 687 + threshold_cpu_callback = amd_64_threshold_cpu_callback; 688 return 0; 689 } 690
+5 -1
arch/x86/kernel/tsc.c
··· 314 mark_tsc_unstable("cpufreq changes"); 315 } 316 317 - set_cyc2ns_scale(tsc_khz_ref, freq->cpu); 318 319 return 0; 320 } ··· 325 326 static int __init cpufreq_tsc(void) 327 { 328 cpufreq_register_notifier(&time_cpufreq_notifier_block, 329 CPUFREQ_TRANSITION_NOTIFIER); 330 return 0;
··· 314 mark_tsc_unstable("cpufreq changes"); 315 } 316 317 + set_cyc2ns_scale(tsc_khz, freq->cpu); 318 319 return 0; 320 } ··· 325 326 static int __init cpufreq_tsc(void) 327 { 328 + if (!cpu_has_tsc) 329 + return 0; 330 + if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 331 + return 0; 332 cpufreq_register_notifier(&time_cpufreq_notifier_block, 333 CPUFREQ_TRANSITION_NOTIFIER); 334 return 0;
+46 -6
arch/x86/pci/amd_bus.c
··· 1 #include <linux/init.h> 2 #include <linux/pci.h> 3 #include <linux/topology.h> 4 #include "pci.h" 5 6 #ifdef CONFIG_X86_64 ··· 556 return 0; 557 } 558 559 - postcore_initcall(early_fill_mp_bus_info); 560 561 - #endif 562 563 /* common 32/64 bit code */ 564 565 #define ENABLE_CF8_EXT_CFG (1ULL << 46) 566 567 - static void enable_pci_io_ecs_per_cpu(void *unused) 568 { 569 u64 reg; 570 rdmsrl(MSR_AMD64_NB_CFG, reg); ··· 576 } 577 } 578 579 - static int __init enable_pci_io_ecs(void) 580 { 581 /* assume all cpus from fam10h have IO ECS */ 582 if (boot_cpu_data.x86 < 0x10) 583 return 0; 584 - on_each_cpu(enable_pci_io_ecs_per_cpu, NULL, 1); 585 pci_probe |= PCI_HAS_IO_ECS; 586 return 0; 587 } 588 589 - postcore_initcall(enable_pci_io_ecs);
··· 1 #include <linux/init.h> 2 #include <linux/pci.h> 3 #include <linux/topology.h> 4 + #include <linux/cpu.h> 5 #include "pci.h" 6 7 #ifdef CONFIG_X86_64 ··· 555 return 0; 556 } 557 558 + #else /* !CONFIG_X86_64 */ 559 560 + static int __init early_fill_mp_bus_info(void) { return 0; } 561 + 562 + #endif /* !CONFIG_X86_64 */ 563 564 /* common 32/64 bit code */ 565 566 #define ENABLE_CF8_EXT_CFG (1ULL << 46) 567 568 + static void enable_pci_io_ecs(void *unused) 569 { 570 u64 reg; 571 rdmsrl(MSR_AMD64_NB_CFG, reg); ··· 573 } 574 } 575 576 + static int __cpuinit amd_cpu_notify(struct notifier_block *self, 577 + unsigned long action, void *hcpu) 578 { 579 + int cpu = (long)hcpu; 580 + switch(action) { 581 + case CPU_ONLINE: 582 + case CPU_ONLINE_FROZEN: 583 + smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0); 584 + break; 585 + default: 586 + break; 587 + } 588 + return NOTIFY_OK; 589 + } 590 + 591 + static struct notifier_block __cpuinitdata amd_cpu_notifier = { 592 + .notifier_call = amd_cpu_notify, 593 + }; 594 + 595 + static int __init pci_io_ecs_init(void) 596 + { 597 + int cpu; 598 + 599 /* assume all cpus from fam10h have IO ECS */ 600 if (boot_cpu_data.x86 < 0x10) 601 return 0; 602 + 603 + register_cpu_notifier(&amd_cpu_notifier); 604 + for_each_online_cpu(cpu) 605 + amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE, 606 + (void *)(long)cpu); 607 pci_probe |= PCI_HAS_IO_ECS; 608 + 609 return 0; 610 } 611 612 + static int __init amd_postcore_init(void) 613 + { 614 + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 615 + return 0; 616 + 617 + early_fill_mp_bus_info(); 618 + pci_io_ecs_init(); 619 + 620 + return 0; 621 + } 622 + 623 + postcore_initcall(amd_postcore_init);
+78
arch/x86/pci/i386.c
··· 31 #include <linux/ioport.h> 32 #include <linux/errno.h> 33 #include <linux/bootmem.h> 34 35 #include <asm/pat.h> 36 37 #include "pci.h" 38 ··· 80 } 81 EXPORT_SYMBOL(pcibios_align_resource); 82 83 /* 84 * Handle resources of PCI devices. If the world were perfect, we could 85 * just allocate all the resource regions and do nothing more. It isn't. ··· 202 pr = pci_find_parent_resource(dev, r); 203 if (!r->start || !pr || 204 request_resource(pr, r) < 0) { 205 dev_err(&dev->dev, "BAR %d: can't " 206 "allocate resource\n", idx); 207 /* ··· 247 r->flags, disabled, pass); 248 pr = pci_find_parent_resource(dev, r); 249 if (!pr || request_resource(pr, r) < 0) { 250 dev_err(&dev->dev, "BAR %d: can't " 251 "allocate resource\n", idx); 252 /* We'll assign a new address later */
··· 31 #include <linux/ioport.h> 32 #include <linux/errno.h> 33 #include <linux/bootmem.h> 34 + #include <linux/acpi.h> 35 36 #include <asm/pat.h> 37 + #include <asm/hpet.h> 38 + #include <asm/io_apic.h> 39 40 #include "pci.h" 41 ··· 77 } 78 EXPORT_SYMBOL(pcibios_align_resource); 79 80 + static int check_res_with_valid(struct pci_dev *dev, struct resource *res) 81 + { 82 + unsigned long base; 83 + unsigned long size; 84 + int i; 85 + 86 + base = res->start; 87 + size = (res->start == 0 && res->end == res->start) ? 0 : 88 + (res->end - res->start + 1); 89 + 90 + if (!base || !size) 91 + return 0; 92 + 93 + #ifdef CONFIG_HPET_TIMER 94 + /* for hpet */ 95 + if (base == hpet_address && (res->flags & IORESOURCE_MEM)) { 96 + dev_info(&dev->dev, "BAR has HPET at %08lx-%08lx\n", 97 + base, base + size - 1); 98 + return 1; 99 + } 100 + #endif 101 + 102 + #ifdef CONFIG_X86_IO_APIC 103 + for (i = 0; i < nr_ioapics; i++) { 104 + unsigned long ioapic_phys = mp_ioapics[i].mp_apicaddr; 105 + 106 + if (base == ioapic_phys && (res->flags & IORESOURCE_MEM)) { 107 + dev_info(&dev->dev, "BAR has ioapic at %08lx-%08lx\n", 108 + base, base + size - 1); 109 + return 1; 110 + } 111 + } 112 + #endif 113 + 114 + #ifdef CONFIG_PCI_MMCONFIG 115 + for (i = 0; i < pci_mmcfg_config_num; i++) { 116 + unsigned long addr; 117 + 118 + addr = pci_mmcfg_config[i].address; 119 + if (base == addr && (res->flags & IORESOURCE_MEM)) { 120 + dev_info(&dev->dev, "BAR has MMCONFIG at %08lx-%08lx\n", 121 + base, base + size - 1); 122 + return 1; 123 + } 124 + } 125 + #endif 126 + 127 + return 0; 128 + } 129 + 130 + static int check_platform(struct pci_dev *dev, struct resource *res) 131 + { 132 + struct resource *root = NULL; 133 + 134 + /* 135 + * forcibly insert it into the 136 + * resource tree 137 + */ 138 + if (res->flags & IORESOURCE_MEM) 139 + root = &iomem_resource; 140 + else if (res->flags & IORESOURCE_IO) 141 + root = &ioport_resource; 142 + 143 + if (root && check_res_with_valid(dev, res)) { 144 + insert_resource(root, res); 145 + 146 + return 1; 147 + } 148 + 149 + return 0; 150 + } 151 /* 152 * Handle resources of PCI devices. If the world were perfect, we could 153 * just allocate all the resource regions and do nothing more. It isn't. ··· 128 pr = pci_find_parent_resource(dev, r); 129 if (!r->start || !pr || 130 request_resource(pr, r) < 0) { 131 + if (check_platform(dev, r)) 132 + continue; 133 dev_err(&dev->dev, "BAR %d: can't " 134 "allocate resource\n", idx); 135 /* ··· 171 r->flags, disabled, pass); 172 pr = pci_find_parent_resource(dev, r); 173 if (!pr || request_resource(pr, r) < 0) { 174 + if (check_platform(dev, r)) 175 + continue; 176 dev_err(&dev->dev, "BAR %d: can't " 177 "allocate resource\n", idx); 178 /* We'll assign a new address later */
+2
include/asm-x86/cpufeature.h
··· 91 #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ 92 #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ 93 #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ 94 95 /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 96 #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ ··· 190 #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) 191 #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) 192 #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) 193 194 #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) 195 # define cpu_has_invlpg 1
··· 91 #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ 92 #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ 93 #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ 94 + #define X86_FEATURE_XMM4_2 (4*32+20) /* Streaming SIMD Extensions-4.2 */ 95 96 /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 97 #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ ··· 189 #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) 190 #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) 191 #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) 192 + #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) 193 194 #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) 195 # define cpu_has_invlpg 1
+1
include/asm-x86/mce.h
··· 92 93 void mce_log(struct mce *m); 94 DECLARE_PER_CPU(struct sys_device, device_mce); 95 96 #ifdef CONFIG_X86_MCE_INTEL 97 void mce_intel_feature_init(struct cpuinfo_x86 *c);
··· 92 93 void mce_log(struct mce *m); 94 DECLARE_PER_CPU(struct sys_device, device_mce); 95 + extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); 96 97 #ifdef CONFIG_X86_MCE_INTEL 98 void mce_intel_feature_init(struct cpuinfo_x86 *c);