Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: add X86_FEATURE_XMM4_2 definitions
x86: fix cpufreq + sched_clock() regression
x86: fix HPET regression in 2.6.26 versus 2.6.25, check hpet against BAR, v3
x86: do not enable TSC notifier if we don't need it
x86 MCE: Fix CPU hotplug problem with multiple multicore AMD CPUs
x86: fix: make PCI ECS for AMD CPUs hotplug capable
x86: fix: do not run code in amd_bus.c on non-AMD CPUs

+142 -20
+5
arch/x86/kernel/cpu/mcheck/mce_64.c
··· 759 759 }; 760 760 761 761 DEFINE_PER_CPU(struct sys_device, device_mce); 762 + void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinitdata; 762 763 763 764 /* Why are there no generic functions for this? */ 764 765 #define ACCESSOR(name, var, start) \ ··· 884 883 case CPU_ONLINE: 885 884 case CPU_ONLINE_FROZEN: 886 885 mce_create_device(cpu); 886 + if (threshold_cpu_callback) 887 + threshold_cpu_callback(action, cpu); 887 888 break; 888 889 case CPU_DEAD: 889 890 case CPU_DEAD_FROZEN: 891 + if (threshold_cpu_callback) 892 + threshold_cpu_callback(action, cpu); 890 893 mce_remove_device(cpu); 891 894 break; 892 895 }
+5 -13
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
··· 628 628 deallocate_threshold_block(cpu, bank); 629 629 630 630 free_out: 631 + kobject_del(b->kobj); 631 632 kobject_put(b->kobj); 632 633 kfree(b); 633 634 per_cpu(threshold_banks, cpu)[bank] = NULL; ··· 646 645 } 647 646 648 647 /* get notified when a cpu comes on/off */ 649 - static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb, 650 - unsigned long action, void *hcpu) 648 + static void __cpuinit amd_64_threshold_cpu_callback(unsigned long action, 649 + unsigned int cpu) 651 650 { 652 - /* cpu was unsigned int to begin with */ 653 - unsigned int cpu = (unsigned long)hcpu; 654 - 655 651 if (cpu >= NR_CPUS) 656 - goto out; 652 + return; 657 653 658 654 switch (action) { 659 655 case CPU_ONLINE: ··· 664 666 default: 665 667 break; 666 668 } 667 - out: 668 - return NOTIFY_OK; 669 669 } 670 - 671 - static struct notifier_block threshold_cpu_notifier __cpuinitdata = { 672 - .notifier_call = threshold_cpu_callback, 673 - }; 674 670 675 671 static __init int threshold_init_device(void) 676 672 { ··· 676 684 if (err) 677 685 return err; 678 686 } 679 - register_hotcpu_notifier(&threshold_cpu_notifier); 687 + threshold_cpu_callback = amd_64_threshold_cpu_callback; 680 688 return 0; 681 689 } 682 690
+5 -1
arch/x86/kernel/tsc.c
··· 314 314 mark_tsc_unstable("cpufreq changes"); 315 315 } 316 316 317 - set_cyc2ns_scale(tsc_khz_ref, freq->cpu); 317 + set_cyc2ns_scale(tsc_khz, freq->cpu); 318 318 319 319 return 0; 320 320 } ··· 325 325 326 326 static int __init cpufreq_tsc(void) 327 327 { 328 + if (!cpu_has_tsc) 329 + return 0; 330 + if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 331 + return 0; 328 332 cpufreq_register_notifier(&time_cpufreq_notifier_block, 329 333 CPUFREQ_TRANSITION_NOTIFIER); 330 334 return 0;
+46 -6
arch/x86/pci/amd_bus.c
··· 1 1 #include <linux/init.h> 2 2 #include <linux/pci.h> 3 3 #include <linux/topology.h> 4 + #include <linux/cpu.h> 4 5 #include "pci.h" 5 6 6 7 #ifdef CONFIG_X86_64 ··· 556 555 return 0; 557 556 } 558 557 559 - postcore_initcall(early_fill_mp_bus_info); 558 + #else /* !CONFIG_X86_64 */ 560 559 561 - #endif 560 + static int __init early_fill_mp_bus_info(void) { return 0; } 561 + 562 + #endif /* !CONFIG_X86_64 */ 562 563 563 564 /* common 32/64 bit code */ 564 565 565 566 #define ENABLE_CF8_EXT_CFG (1ULL << 46) 566 567 567 - static void enable_pci_io_ecs_per_cpu(void *unused) 568 + static void enable_pci_io_ecs(void *unused) 568 569 { 569 570 u64 reg; 570 571 rdmsrl(MSR_AMD64_NB_CFG, reg); ··· 576 573 } 577 574 } 578 575 579 - static int __init enable_pci_io_ecs(void) 576 + static int __cpuinit amd_cpu_notify(struct notifier_block *self, 577 + unsigned long action, void *hcpu) 580 578 { 579 + int cpu = (long)hcpu; 580 + switch(action) { 581 + case CPU_ONLINE: 582 + case CPU_ONLINE_FROZEN: 583 + smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0); 584 + break; 585 + default: 586 + break; 587 + } 588 + return NOTIFY_OK; 589 + } 590 + 591 + static struct notifier_block __cpuinitdata amd_cpu_notifier = { 592 + .notifier_call = amd_cpu_notify, 593 + }; 594 + 595 + static int __init pci_io_ecs_init(void) 596 + { 597 + int cpu; 598 + 581 599 /* assume all cpus from fam10h have IO ECS */ 582 600 if (boot_cpu_data.x86 < 0x10) 583 601 return 0; 584 - on_each_cpu(enable_pci_io_ecs_per_cpu, NULL, 1); 602 + 603 + register_cpu_notifier(&amd_cpu_notifier); 604 + for_each_online_cpu(cpu) 605 + amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE, 606 + (void *)(long)cpu); 585 607 pci_probe |= PCI_HAS_IO_ECS; 608 + 586 609 return 0; 587 610 } 588 611 589 - postcore_initcall(enable_pci_io_ecs); 612 + static int __init amd_postcore_init(void) 613 + { 614 + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 615 + return 0; 616 + 617 + early_fill_mp_bus_info(); 618 + pci_io_ecs_init(); 619 + 620 + return 0; 621 + } 622 + 623 + postcore_initcall(amd_postcore_init);
+78
arch/x86/pci/i386.c
··· 31 31 #include <linux/ioport.h> 32 32 #include <linux/errno.h> 33 33 #include <linux/bootmem.h> 34 + #include <linux/acpi.h> 34 35 35 36 #include <asm/pat.h> 37 + #include <asm/hpet.h> 38 + #include <asm/io_apic.h> 36 39 37 40 #include "pci.h" 38 41 ··· 80 77 } 81 78 EXPORT_SYMBOL(pcibios_align_resource); 82 79 80 + static int check_res_with_valid(struct pci_dev *dev, struct resource *res) 81 + { 82 + unsigned long base; 83 + unsigned long size; 84 + int i; 85 + 86 + base = res->start; 87 + size = (res->start == 0 && res->end == res->start) ? 0 : 88 + (res->end - res->start + 1); 89 + 90 + if (!base || !size) 91 + return 0; 92 + 93 + #ifdef CONFIG_HPET_TIMER 94 + /* for hpet */ 95 + if (base == hpet_address && (res->flags & IORESOURCE_MEM)) { 96 + dev_info(&dev->dev, "BAR has HPET at %08lx-%08lx\n", 97 + base, base + size - 1); 98 + return 1; 99 + } 100 + #endif 101 + 102 + #ifdef CONFIG_X86_IO_APIC 103 + for (i = 0; i < nr_ioapics; i++) { 104 + unsigned long ioapic_phys = mp_ioapics[i].mp_apicaddr; 105 + 106 + if (base == ioapic_phys && (res->flags & IORESOURCE_MEM)) { 107 + dev_info(&dev->dev, "BAR has ioapic at %08lx-%08lx\n", 108 + base, base + size - 1); 109 + return 1; 110 + } 111 + } 112 + #endif 113 + 114 + #ifdef CONFIG_PCI_MMCONFIG 115 + for (i = 0; i < pci_mmcfg_config_num; i++) { 116 + unsigned long addr; 117 + 118 + addr = pci_mmcfg_config[i].address; 119 + if (base == addr && (res->flags & IORESOURCE_MEM)) { 120 + dev_info(&dev->dev, "BAR has MMCONFIG at %08lx-%08lx\n", 121 + base, base + size - 1); 122 + return 1; 123 + } 124 + } 125 + #endif 126 + 127 + return 0; 128 + } 129 + 130 + static int check_platform(struct pci_dev *dev, struct resource *res) 131 + { 132 + struct resource *root = NULL; 133 + 134 + /* 135 + * forcibly insert it into the 136 + * resource tree 137 + */ 138 + if (res->flags & IORESOURCE_MEM) 139 + root = &iomem_resource; 140 + else if (res->flags & IORESOURCE_IO) 141 + root = &ioport_resource; 142 + 143 + if (root && check_res_with_valid(dev, res)) { 144 + insert_resource(root, res); 145 + 146 + return 1; 147 + } 148 + 149 + return 0; 150 + } 83 151 /* 84 152 * Handle resources of PCI devices. If the world were perfect, we could 85 153 * just allocate all the resource regions and do nothing more. It isn't. ··· 202 128 pr = pci_find_parent_resource(dev, r); 203 129 if (!r->start || !pr || 204 130 request_resource(pr, r) < 0) { 131 + if (check_platform(dev, r)) 132 + continue; 205 133 dev_err(&dev->dev, "BAR %d: can't " 206 134 "allocate resource\n", idx); 207 135 /* ··· 247 171 r->flags, disabled, pass); 248 172 pr = pci_find_parent_resource(dev, r); 249 173 if (!pr || request_resource(pr, r) < 0) { 174 + if (check_platform(dev, r)) 175 + continue; 250 176 dev_err(&dev->dev, "BAR %d: can't " 251 177 "allocate resource\n", idx); 252 178 /* We'll assign a new address later */
+2
include/asm-x86/cpufeature.h
··· 91 91 #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ 92 92 #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ 93 93 #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ 94 + #define X86_FEATURE_XMM4_2 (4*32+20) /* Streaming SIMD Extensions-4.2 */ 94 95 95 96 /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 96 97 #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ ··· 190 189 #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) 191 190 #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) 192 191 #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) 192 + #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) 193 193 194 194 #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) 195 195 # define cpu_has_invlpg 1
+1
include/asm-x86/mce.h
··· 92 92 93 93 void mce_log(struct mce *m); 94 94 DECLARE_PER_CPU(struct sys_device, device_mce); 95 + extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); 95 96 96 97 #ifdef CONFIG_X86_MCE_INTEL 97 98 void mce_intel_feature_init(struct cpuinfo_x86 *c);