Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6

* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6:
[PATCH] x86-64: Fix race in exit_idle
[PATCH] x86-64: Fix vgetcpu when CONFIG_HOTPLUG_CPU is disabled
[PATCH] x86: Add acpi_user_timer_override option for Asus boards
[PATCH] x86-64: setup saved_max_pfn correctly (kdump)
[PATCH] x86-64: Handle reserve_bootmem_generic beyond end_pfn
[PATCH] x86-64: shorten the x86_64 boot setup GDT to what the comment says
[PATCH] x86-64: Fix PTRACE_[SG]ET_THREAD_AREA regression with ia32 emulation.
[PATCH] x86-64: Fix partial page check to ensure unusable memory is not being marked usable.
Revert "[PATCH] MMCONFIG and new Intel motherboards"

+86 -75
+4
Documentation/kernel-parameters.txt
··· 164 164 acpi_skip_timer_override [HW,ACPI] 165 165 Recognize and ignore IRQ0/pin2 Interrupt Override. 166 166 For broken nForce2 BIOS resulting in XT-PIC timer. 167 + acpi_use_timer_override [HW,ACPI} 168 + Use timer override. For some broken Nvidia NF5 boards 169 + that require a timer override, but don't have 170 + HPET 167 171 168 172 acpi_dbg_layer= [HW,ACPI] 169 173 Format: <int>
+8
arch/i386/kernel/acpi/boot.c
··· 82 82 acpi_interrupt_flags acpi_sci_flags __initdata; 83 83 int acpi_sci_override_gsi __initdata; 84 84 int acpi_skip_timer_override __initdata; 85 + int acpi_use_timer_override __initdata; 85 86 86 87 #ifdef CONFIG_X86_LOCAL_APIC 87 88 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; ··· 1301 1300 return 0; 1302 1301 } 1303 1302 early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); 1303 + 1304 + static int __init parse_acpi_use_timer_override(char *arg) 1305 + { 1306 + acpi_use_timer_override = 1; 1307 + return 0; 1308 + } 1309 + early_param("acpi_use_timer_override", parse_acpi_use_timer_override); 1304 1310 #endif /* CONFIG_X86_IO_APIC */ 1305 1311 1306 1312 static int __init setup_acpi_sci(char *s)
+7 -1
arch/i386/kernel/acpi/earlyquirk.c
··· 27 27 #ifdef CONFIG_ACPI 28 28 /* According to Nvidia all timer overrides are bogus unless HPET 29 29 is enabled. */ 30 - if (vendor == PCI_VENDOR_ID_NVIDIA) { 30 + if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { 31 31 nvidia_hpet_detected = 0; 32 32 acpi_table_parse(ACPI_HPET, nvidia_hpet_check); 33 33 if (nvidia_hpet_detected == 0) { 34 34 acpi_skip_timer_override = 1; 35 + printk(KERN_INFO "Nvidia board " 36 + "detected. Ignoring ACPI " 37 + "timer override.\n"); 38 + printk(KERN_INFO "If you got timer trouble " 39 + "try acpi_use_timer_override\n"); 40 + 35 41 } 36 42 } 37 43 #endif
+2 -3
arch/x86_64/boot/setup.S
··· 836 836 .word 0x9200 # data read/write 837 837 .word 0x00CF # granularity = 4096, 386 838 838 # (+5th nibble of limit) 839 + gdt_end: 839 840 idt_48: 840 841 .word 0 # idt limit = 0 841 842 .word 0, 0 # idt base = 0L 842 843 gdt_48: 843 - .word 0x8000 # gdt limit=2048, 844 - # 256 GDT entries 845 - 844 + .word gdt_end-gdt-1 # gdt limit 846 845 .word 0, 0 # gdt base (filled in later) 847 846 848 847 # Include video setup & detection code
+2
arch/x86_64/ia32/ptrace32.c
··· 244 244 case PTRACE_DETACH: 245 245 case PTRACE_SYSCALL: 246 246 case PTRACE_SETOPTIONS: 247 + case PTRACE_SET_THREAD_AREA: 248 + case PTRACE_GET_THREAD_AREA: 247 249 return sys_ptrace(request, pid, addr, data); 248 250 249 251 default:
+3 -1
arch/x86_64/kernel/e820.c
··· 278 278 >> PAGE_SHIFT; 279 279 280 280 /* Skip map entries smaller than a page */ 281 - if (ei_startpfn > ei_endpfn) 281 + if (ei_startpfn >= ei_endpfn) 282 282 continue; 283 283 284 284 /* Check if end_pfn_map should be updated */ ··· 594 594 * size before original memory map is 595 595 * reset. 596 596 */ 597 + e820_register_active_regions(0, 0, -1UL); 597 598 saved_max_pfn = e820_end_of_ram(); 599 + remove_all_active_ranges(); 598 600 #endif 599 601 end_pfn_map = 0; 600 602 e820.nr_map = 0;
+8
arch/x86_64/kernel/early-quirks.c
··· 45 45 /* 46 46 * All timer overrides on Nvidia are 47 47 * wrong unless HPET is enabled. 48 + * Unfortunately that's not true on many Asus boards. 49 + * We don't know yet how to detect this automatically, but 50 + * at least allow a command line override. 48 51 */ 52 + if (acpi_use_timer_override) 53 + return; 54 + 49 55 nvidia_hpet_detected = 0; 50 56 acpi_table_parse(ACPI_HPET, nvidia_hpet_check); 51 57 if (nvidia_hpet_detected == 0) { ··· 59 53 printk(KERN_INFO "Nvidia board " 60 54 "detected. Ignoring ACPI " 61 55 "timer override.\n"); 56 + printk(KERN_INFO "If you got timer trouble " 57 + "try acpi_use_timer_override\n"); 62 58 } 63 59 #endif 64 60 /* RED-PEN skip them on mptables too? */
+1 -2
arch/x86_64/kernel/process.c
··· 88 88 89 89 static void __exit_idle(void) 90 90 { 91 - if (read_pda(isidle) == 0) 91 + if (test_and_clear_bit_pda(0, isidle) == 0) 92 92 return; 93 - write_pda(isidle, 0); 94 93 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); 95 94 } 96 95
+1 -2
arch/x86_64/kernel/smp.c
··· 376 376 /* prevent preemption and reschedule on another processor */ 377 377 int me = get_cpu(); 378 378 if (cpu == me) { 379 - WARN_ON(1); 380 379 put_cpu(); 381 - return -EBUSY; 380 + return 0; 382 381 } 383 382 spin_lock_bh(&call_lock); 384 383 __smp_call_function_single(cpu, func, info, nonatomic, wait);
-11
arch/x86_64/kernel/time.c
··· 876 876 timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL 877 877 }; 878 878 879 - static int __cpuinit 880 - time_cpu_notifier(struct notifier_block *nb, unsigned long action, void *hcpu) 881 - { 882 - unsigned cpu = (unsigned long) hcpu; 883 - if (action == CPU_ONLINE) 884 - vsyscall_set_cpu(cpu); 885 - return NOTIFY_DONE; 886 - } 887 - 888 879 void __init time_init(void) 889 880 { 890 881 if (nohpet) ··· 916 925 vxtime.last_tsc = get_cycles_sync(); 917 926 set_cyc2ns_scale(cpu_khz); 918 927 setup_irq(0, &irq0); 919 - hotcpu_notifier(time_cpu_notifier, 0); 920 - time_cpu_notifier(NULL, CPU_ONLINE, (void *)(long)smp_processor_id()); 921 928 922 929 #ifndef CONFIG_SMP 923 930 time_init_gtod();
+25 -20
arch/x86_64/kernel/vsyscall.c
··· 27 27 #include <linux/jiffies.h> 28 28 #include <linux/sysctl.h> 29 29 #include <linux/getcpu.h> 30 + #include <linux/cpu.h> 31 + #include <linux/smp.h> 32 + #include <linux/notifier.h> 30 33 31 34 #include <asm/vsyscall.h> 32 35 #include <asm/pgtable.h> ··· 246 243 247 244 #endif 248 245 249 - static void __cpuinit write_rdtscp_cb(void *info) 250 - { 251 - write_rdtscp_aux((unsigned long)info); 252 - } 253 - 254 - void __cpuinit vsyscall_set_cpu(int cpu) 246 + /* Assume __initcall executes before all user space. Hopefully kmod 247 + doesn't violate that. We'll find out if it does. */ 248 + static void __cpuinit vsyscall_set_cpu(int cpu) 255 249 { 256 250 unsigned long *d; 257 251 unsigned long node = 0; 258 252 #ifdef CONFIG_NUMA 259 253 node = cpu_to_node[cpu]; 260 254 #endif 261 - if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) { 262 - void *info = (void *)((node << 12) | cpu); 263 - /* Can happen on preemptive kernel */ 264 - if (get_cpu() == cpu) 265 - write_rdtscp_cb(info); 266 - #ifdef CONFIG_SMP 267 - else { 268 - /* the notifier is unfortunately not executed on the 269 - target CPU */ 270 - smp_call_function_single(cpu,write_rdtscp_cb,info,0,1); 271 - } 272 - #endif 273 - put_cpu(); 274 - } 255 + if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) 256 + write_rdtscp_aux((node << 12) | cpu); 275 257 276 258 /* Store cpu number in limit so that it can be loaded quickly 277 259 in user space in vgetcpu. ··· 266 278 *d |= cpu; 267 279 *d |= (node & 0xf) << 12; 268 280 *d |= (node >> 4) << 48; 281 + } 282 + 283 + static void __cpuinit cpu_vsyscall_init(void *arg) 284 + { 285 + /* preemption should be already off */ 286 + vsyscall_set_cpu(raw_smp_processor_id()); 287 + } 288 + 289 + static int __cpuinit 290 + cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) 291 + { 292 + long cpu = (long)arg; 293 + if (action == CPU_ONLINE) 294 + smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); 295 + return NOTIFY_DONE; 269 296 } 270 297 271 298 static void __init map_vsyscall(void) ··· 302 299 #ifdef CONFIG_SYSCTL 303 300 register_sysctl_table(kernel_root_table2, 0); 304 301 #endif 302 + on_each_cpu(cpu_vsyscall_init, NULL, 0, 1); 303 + hotcpu_notifier(cpu_vsyscall_notifier, 0); 305 304 return 0; 306 305 } 307 306
+14 -1
arch/x86_64/mm/init.c
··· 655 655 656 656 void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 657 657 { 658 - /* Should check here against the e820 map to avoid double free */ 659 658 #ifdef CONFIG_NUMA 660 659 int nid = phys_to_nid(phys); 660 + #endif 661 + unsigned long pfn = phys >> PAGE_SHIFT; 662 + if (pfn >= end_pfn) { 663 + /* This can happen with kdump kernels when accessing firmware 664 + tables. */ 665 + if (pfn < end_pfn_map) 666 + return; 667 + printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n", 668 + phys, len); 669 + return; 670 + } 671 + 672 + /* Should check here against the e820 map to avoid double free */ 673 + #ifdef CONFIG_NUMA 661 674 reserve_bootmem_node(NODE_DATA(nid), phys, len); 662 675 #else 663 676 reserve_bootmem(phys, len);
-32
arch/x86_64/pci/mmconfig.c
··· 163 163 } 164 164 } 165 165 166 - static __init void pci_mmcfg_insert_resources(void) 167 - { 168 - #define PCI_MMCFG_RESOURCE_NAME_LEN 19 169 - int i; 170 - struct resource *res; 171 - char *names; 172 - unsigned num_buses; 173 - 174 - res = kcalloc(PCI_MMCFG_RESOURCE_NAME_LEN + sizeof(*res), 175 - pci_mmcfg_config_num, GFP_KERNEL); 176 - 177 - if (!res) { 178 - printk(KERN_ERR "PCI: Unable to allocate MMCONFIG resources\n"); 179 - return; 180 - } 181 - 182 - names = (void *)&res[pci_mmcfg_config_num]; 183 - for (i = 0; i < pci_mmcfg_config_num; i++, res++) { 184 - num_buses = pci_mmcfg_config[i].end_bus_number - 185 - pci_mmcfg_config[i].start_bus_number + 1; 186 - res->name = names; 187 - snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %u", 188 - pci_mmcfg_config[i].pci_segment_group_number); 189 - res->start = pci_mmcfg_config[i].base_address; 190 - res->end = res->start + (num_buses << 20) - 1; 191 - res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 192 - insert_resource(&iomem_resource, res); 193 - names += PCI_MMCFG_RESOURCE_NAME_LEN; 194 - } 195 - } 196 - 197 166 void __init pci_mmcfg_init(int type) 198 167 { 199 168 int i; ··· 206 237 } 207 238 208 239 unreachable_devices(); 209 - pci_mmcfg_insert_resources(); 210 240 211 241 raw_pci_ops = &pci_mmcfg; 212 242 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
+1
include/asm-i386/acpi.h
··· 132 132 133 133 #ifdef CONFIG_X86_IO_APIC 134 134 extern int acpi_skip_timer_override; 135 + extern int acpi_use_timer_override; 135 136 #endif 136 137 137 138 static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
+1
include/asm-x86_64/acpi.h
··· 163 163 #define ARCH_HAS_POWER_INIT 1 164 164 165 165 extern int acpi_skip_timer_override; 166 + extern int acpi_use_timer_override; 166 167 167 168 #endif /*__KERNEL__*/ 168 169
+9
include/asm-x86_64/pda.h
··· 109 109 #define sub_pda(field,val) pda_to_op("sub",field,val) 110 110 #define or_pda(field,val) pda_to_op("or",field,val) 111 111 112 + /* This is not atomic against other CPUs -- CPU preemption needs to be off */ 113 + #define test_and_clear_bit_pda(bit,field) ({ \ 114 + int old__; \ 115 + asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \ 116 + : "=r" (old__), "+m" (_proxy_pda.field) \ 117 + : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \ 118 + old__; \ 119 + }) 120 + 112 121 #endif 113 122 114 123 #define PDA_STACKOFFSET (5*8)
-2
include/asm-x86_64/vsyscall.h
··· 59 59 60 60 extern int sysctl_vsyscall; 61 61 62 - extern void vsyscall_set_cpu(int cpu); 63 - 64 62 #define ARCH_HAVE_XTIME_LOCK 1 65 63 66 64 #endif /* __KERNEL__ */