Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86

* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86:
x86: simplify "make ARCH=x86" and fix kconfig all.config
x86: reboot fixup for wrap2c board
x86: check boundary in count setup resource
x86: fix reboot with no keyboard attached
x86: add hpet sanity checks
x86: on x86_64, correct reading of PC RTC when update in progress in time_64.c
x86: fix freeze in x86_64 RTC update code in time_64.c
ntp: fix typo that makes sync_cmos_clock erratic
Remove x86 merge artifact from top Makefile
x86: fixup cpu_info array conversion
x86: show cpuinfo only for online CPUs
x86: fix cpu-hotplug regression
x86: ignore the sys_getcpu() tcache parameter
x86: voyager use correct header file name
x86: fix smp init sections
x86: fix voyager_cat_init section
x86: fix bogus memcpy in es7000_check_dsdt()

+81 -82
+1 -6
Makefile
··· 1332 1332 ALLINCLUDE_ARCHS := $(ALLSOURCE_ARCHS) 1333 1333 endif 1334 1334 1335 - # Take care of arch/x86 1336 - ifeq ($(ARCH), $(SRCARCH)) 1337 - ALLSOURCE_ARCHS := $(ARCH) 1338 - else 1339 - ALLSOURCE_ARCHS := $(ARCH) $(SRCARCH) 1340 - endif 1335 + ALLSOURCE_ARCHS := $(SRCARCH) 1341 1336 1342 1337 define find-sources 1343 1338 ( for arch in $(ALLSOURCE_ARCHS) ; do \
+32
arch/x86/kernel/acpi/boot.c
··· 637 637 } 638 638 639 639 hpet_address = hpet_tbl->address.address; 640 + 641 + /* 642 + * Some broken BIOSes advertise HPET at 0x0. We really do not 643 + * want to allocate a resource there. 644 + */ 645 + if (!hpet_address) { 646 + printk(KERN_WARNING PREFIX 647 + "HPET id: %#x base: %#lx is invalid\n", 648 + hpet_tbl->id, hpet_address); 649 + return 0; 650 + } 651 + #ifdef CONFIG_X86_64 652 + /* 653 + * Some even more broken BIOSes advertise HPET at 654 + * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add 655 + * some noise: 656 + */ 657 + if (hpet_address == 0xfed0000000000000UL) { 658 + if (!hpet_force_user) { 659 + printk(KERN_WARNING PREFIX "HPET id: %#x " 660 + "base: 0xfed0000000000000 is bogus\n " 661 + "try hpet=force on the kernel command line to " 662 + "fix it up to 0xfed00000.\n", hpet_tbl->id); 663 + hpet_address = 0; 664 + return 0; 665 + } 666 + printk(KERN_WARNING PREFIX 667 + "HPET id: %#x base: 0xfed0000000000000 fixed up " 668 + "to 0xfed00000.\n", hpet_tbl->id); 669 + hpet_address >>= 32; 670 + } 671 + #endif 640 672 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", 641 673 hpet_tbl->id, hpet_address); 642 674
+1 -1
arch/x86/kernel/cpu/mcheck/mce_64.c
··· 810 810 int err; 811 811 int i; 812 812 813 - if (!mce_available(&cpu_data(cpu))) 813 + if (!mce_available(&boot_cpu_data)) 814 814 return -EIO; 815 815 816 816 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
+3 -5
arch/x86/kernel/cpu/proc.c
··· 89 89 int fpu_exception; 90 90 91 91 #ifdef CONFIG_SMP 92 - if (!cpu_online(n)) 93 - return 0; 94 92 n = c->cpu_index; 95 93 #endif 96 94 seq_printf(m, "processor\t: %d\n" ··· 175 177 static void *c_start(struct seq_file *m, loff_t *pos) 176 178 { 177 179 if (*pos == 0) /* just in case, cpu 0 is not the first */ 178 - *pos = first_cpu(cpu_possible_map); 179 - if ((*pos) < NR_CPUS && cpu_possible(*pos)) 180 + *pos = first_cpu(cpu_online_map); 181 + if ((*pos) < NR_CPUS && cpu_online(*pos)) 180 182 return &cpu_data(*pos); 181 183 return NULL; 182 184 } 183 185 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 184 186 { 185 - *pos = next_cpu(*pos, cpu_possible_map); 187 + *pos = next_cpu(*pos, cpu_online_map); 186 188 return c_start(m, pos); 187 189 } 188 190 static void c_stop(struct seq_file *m, void *v)
+1
arch/x86/kernel/reboot_fixups_32.c
··· 39 39 static struct device_fixup fixups_table[] = { 40 40 { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset }, 41 41 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, cs5536_warm_reset }, 42 + { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE, cs5530a_warm_reset }, 42 43 }; 43 44 44 45 /*
+3 -6
arch/x86/kernel/setup_64.c
··· 892 892 893 893 #ifdef CONFIG_SMP 894 894 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; 895 - c->cpu_index = 0; 896 895 #endif 897 896 } 898 897 ··· 1077 1078 1078 1079 1079 1080 #ifdef CONFIG_SMP 1080 - if (!cpu_online(c->cpu_index)) 1081 - return 0; 1082 1081 cpu = c->cpu_index; 1083 1082 #endif 1084 1083 ··· 1168 1171 static void *c_start(struct seq_file *m, loff_t *pos) 1169 1172 { 1170 1173 if (*pos == 0) /* just in case, cpu 0 is not the first */ 1171 - *pos = first_cpu(cpu_possible_map); 1172 - if ((*pos) < NR_CPUS && cpu_possible(*pos)) 1174 + *pos = first_cpu(cpu_online_map); 1175 + if ((*pos) < NR_CPUS && cpu_online(*pos)) 1173 1176 return &cpu_data(*pos); 1174 1177 return NULL; 1175 1178 } 1176 1179 1177 1180 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1178 1181 { 1179 - *pos = next_cpu(*pos, cpu_possible_map); 1182 + *pos = next_cpu(*pos, cpu_online_map); 1180 1183 return c_start(m, pos); 1181 1184 } 1182 1185
+22 -19
arch/x86/kernel/time_64.c
··· 82 82 int retval = 0; 83 83 int real_seconds, real_minutes, cmos_minutes; 84 84 unsigned char control, freq_select; 85 + unsigned long flags; 85 86 86 87 /* 87 - * IRQs are disabled when we're called from the timer interrupt, 88 - * no need for spin_lock_irqsave() 88 + * set_rtc_mmss is called when irqs are enabled, so disable irqs here 89 89 */ 90 - 91 - spin_lock(&rtc_lock); 92 - 90 + spin_lock_irqsave(&rtc_lock, flags); 93 91 /* 94 92 * Tell the clock it's being set and stop it. 95 93 */ 96 - 97 94 control = CMOS_READ(RTC_CONTROL); 98 95 CMOS_WRITE(control | RTC_SET, RTC_CONTROL); 99 96 ··· 135 138 CMOS_WRITE(control, RTC_CONTROL); 136 139 CMOS_WRITE(freq_select, RTC_FREQ_SELECT); 137 140 138 - spin_unlock(&rtc_lock); 141 + spin_unlock_irqrestore(&rtc_lock, flags); 139 142 140 143 return retval; 141 144 } ··· 161 164 unsigned century = 0; 162 165 163 166 spin_lock_irqsave(&rtc_lock, flags); 167 + /* 168 + * if UIP is clear, then we have >= 244 microseconds before RTC 169 + * registers will be updated. Spec sheet says that this is the 170 + * reliable way to read RTC - registers invalid (off bus) during update 171 + */ 172 + while ((CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) 173 + cpu_relax(); 164 174 165 - do { 166 - sec = CMOS_READ(RTC_SECONDS); 167 - min = CMOS_READ(RTC_MINUTES); 168 - hour = CMOS_READ(RTC_HOURS); 169 - day = CMOS_READ(RTC_DAY_OF_MONTH); 170 - mon = CMOS_READ(RTC_MONTH); 171 - year = CMOS_READ(RTC_YEAR); 175 + 176 + /* now read all RTC registers while stable with interrupts disabled */ 177 + sec = CMOS_READ(RTC_SECONDS); 178 + min = CMOS_READ(RTC_MINUTES); 179 + hour = CMOS_READ(RTC_HOURS); 180 + day = CMOS_READ(RTC_DAY_OF_MONTH); 181 + mon = CMOS_READ(RTC_MONTH); 182 + year = CMOS_READ(RTC_YEAR); 172 183 #ifdef CONFIG_ACPI 173 - if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && 174 - acpi_gbl_FADT.century) 175 - century = CMOS_READ(acpi_gbl_FADT.century); 184 + if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && 185 + acpi_gbl_FADT.century) 186 + century = CMOS_READ(acpi_gbl_FADT.century); 176 187 #endif 177 - } while (sec != CMOS_READ(RTC_SECONDS)); 178 - 179 188 spin_unlock_irqrestore(&rtc_lock, flags); 180 189 181 190 /*
+1 -1
arch/x86/mach-voyager/voyager_cat.c
··· 568 568 * boot cpu *after* all memory initialisation has been done (so we can 569 569 * use kmalloc) but before smp initialisation, so we can probe the SMP 570 570 * configuration and pick up necessary information. */ 571 - void 571 + void __init 572 572 voyager_cat_init(void) 573 573 { 574 574 voyager_module_t **modpp = &voyager_initial_module;
+2 -2
arch/x86/mach-voyager/voyager_smp.c
··· 1900 1900 smp_boot_cpus(); 1901 1901 } 1902 1902 1903 - static void __devinit voyager_smp_prepare_boot_cpu(void) 1903 + static void __cpuinit voyager_smp_prepare_boot_cpu(void) 1904 1904 { 1905 1905 init_gdt(smp_processor_id()); 1906 1906 switch_to_new_gdt(); ··· 1911 1911 cpu_set(smp_processor_id(), cpu_present_map); 1912 1912 } 1913 1913 1914 - static int __devinit 1914 + static int __cpuinit 1915 1915 voyager_cpu_up(unsigned int cpu) 1916 1916 { 1917 1917 /* This only works at boot for x86. See "rewrite" above. */
+6
arch/x86/pci/acpi.c
··· 77 77 struct acpi_resource_address64 addr; 78 78 acpi_status status; 79 79 80 + if (info->res_num >= PCI_BUS_NUM_RESOURCES) 81 + return AE_OK; 82 + 80 83 status = resource_to_addr(acpi_res, &addr); 81 84 if (ACPI_SUCCESS(status)) 82 85 info->res_num++; ··· 95 92 acpi_status status; 96 93 unsigned long flags; 97 94 struct resource *root; 95 + 96 + if (info->res_num >= PCI_BUS_NUM_RESOURCES) 97 + return AE_OK; 98 98 99 99 status = resource_to_addr(acpi_res, &addr); 100 100 if (!ACPI_SUCCESS(status))
+2 -17
arch/x86/vdso/vgetcpu.c
··· 13 13 #include <asm/vgtod.h> 14 14 #include "vextern.h" 15 15 16 - long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) 16 + long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) 17 17 { 18 18 unsigned int dummy, p; 19 - unsigned long j = 0; 20 19 21 - /* Fast cache - only recompute value once per jiffies and avoid 22 - relatively costly rdtscp/cpuid otherwise. 23 - This works because the scheduler usually keeps the process 24 - on the same CPU and this syscall doesn't guarantee its 25 - results anyways. 26 - We do this here because otherwise user space would do it on 27 - its own in a likely inferior way (no access to jiffies). 28 - If you don't like it pass NULL. */ 29 - if (tcache && tcache->blob[0] == (j = *vdso_jiffies)) { 30 - p = tcache->blob[1]; 31 - } else if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) { 20 + if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) { 32 21 /* Load per CPU data from RDTSCP */ 33 22 rdtscp(dummy, dummy, p); 34 23 } else { 35 24 /* Load per CPU data from GDT */ 36 25 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); 37 - } 38 - if (tcache) { 39 - tcache->blob[0] = j; 40 - tcache->blob[1] = p; 41 26 } 42 27 if (cpu) 43 28 *cpu = p & 0xfff;
+1 -1
include/asm-x86/mach-default/mach_reboot.h
··· 49 49 udelay(50); 50 50 kb_wait(); 51 51 udelay(50); 52 - outb(cmd | 0x04, 0x60); /* set "System flag" */ 52 + outb(cmd | 0x14, 0x60); /* set "System flag" and "Keyboard Disabled" */ 53 53 udelay(50); 54 54 kb_wait(); 55 55 udelay(50);
+3 -3
include/asm-x86/mach-es7000/mach_mpparse.h
··· 29 29 static inline int es7000_check_dsdt(void) 30 30 { 31 31 struct acpi_table_header header; 32 - memcpy(&header, 0, sizeof(struct acpi_table_header)); 33 - acpi_get_table_header(ACPI_SIG_DSDT, 0, &header); 34 - if (!strncmp(header.oem_id, "UNISYS", 6)) 32 + 33 + if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) && 34 + !strncmp(header.oem_id, "UNISYS", 6)) 35 35 return 1; 36 36 return 0; 37 37 }
+1 -1
include/asm-x86/mach-voyager/setup_arch.h
··· 1 1 #include <asm/voyager.h> 2 - #include <asm/setup_32.h> 2 + #include <asm/setup.h> 3 3 #define VOYAGER_BIOS_INFO ((struct voyager_bios_info *) \ 4 4 (&boot_params.apm_bios_info)) 5 5
+1 -19
kernel/sys.c
··· 1750 1750 } 1751 1751 1752 1752 asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, 1753 - struct getcpu_cache __user *cache) 1753 + struct getcpu_cache __user *unused) 1754 1754 { 1755 1755 int err = 0; 1756 1756 int cpu = raw_smp_processor_id(); ··· 1758 1758 err |= put_user(cpu, cpup); 1759 1759 if (nodep) 1760 1760 err |= put_user(cpu_to_node(cpu), nodep); 1761 - if (cache) { 1762 - /* 1763 - * The cache is not needed for this implementation, 1764 - * but make sure user programs pass something 1765 - * valid. vsyscall implementations can instead make 1766 - * good use of the cache. Only use t0 and t1 because 1767 - * these are available in both 32bit and 64bit ABI (no 1768 - * need for a compat_getcpu). 32bit has enough 1769 - * padding 1770 - */ 1771 - unsigned long t0, t1; 1772 - get_user(t0, &cache->blob[0]); 1773 - get_user(t1, &cache->blob[1]); 1774 - t0++; 1775 - t1++; 1776 - put_user(t0, &cache->blob[0]); 1777 - put_user(t1, &cache->blob[1]); 1778 - } 1779 1761 return err ? -EFAULT : 0; 1780 1762 } 1781 1763
+1 -1
kernel/time/ntp.c
··· 205 205 return; 206 206 207 207 getnstimeofday(&now); 208 - if (abs(xtime.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) 208 + if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) 209 209 fail = update_persistent_clock(now); 210 210 211 211 next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec;