Merge branch 'x86-fixes-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: cpu_index build fix
x86/voyager: fix missing cpu_index initialisation
x86/voyager: fix compile breakage caused by dc1e35c6e95e8923cf1d3510438b63c600fee1e2
x86: fix /dev/mem mmap breakage when PAT is disabled
x86/voyager: fix compile breakage casued by x86: move prefill_possible_map calling early
x86: use CONFIG_X86_SMP instead of CONFIG_SMP
x86/voyager: fix boot breakage caused by x86: boot secondary cpus through initial_code
x86, uv: fix compile error in uv_hub.h
i386/PAE: fix pud_page()
x86: remove debug code from arch_add_memory()
x86: start annotating early ioremap pointers with __iomem
x86: two trivial sparse annotations
x86: fix init_memory_mapping for [dc000000 - e0000000) - v2

+85 -40
+4
arch/x86/Kconfig
··· 231 232 If you don't know what to do here, say N. 233 234 config X86_FIND_SMP_CONFIG 235 def_bool y 236 depends on X86_MPPARSE || X86_VOYAGER
··· 231 232 If you don't know what to do here, say N. 233 234 + config X86_HAS_BOOT_CPU_ID 235 + def_bool y 236 + depends on X86_VOYAGER 237 + 238 config X86_FIND_SMP_CONFIG 239 def_bool y 240 depends on X86_MPPARSE || X86_VOYAGER
+3 -3
arch/x86/include/asm/io.h
··· 82 extern void early_ioremap_init(void); 83 extern void early_ioremap_clear(void); 84 extern void early_ioremap_reset(void); 85 - extern void *early_ioremap(unsigned long offset, unsigned long size); 86 - extern void *early_memremap(unsigned long offset, unsigned long size); 87 - extern void early_iounmap(void *addr, unsigned long size); 88 extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); 89 90
··· 82 extern void early_ioremap_init(void); 83 extern void early_ioremap_clear(void); 84 extern void early_ioremap_reset(void); 85 + extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); 86 + extern void __iomem *early_memremap(unsigned long offset, unsigned long size); 87 + extern void early_iounmap(void __iomem *addr, unsigned long size); 88 extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); 89 90
+2 -2
arch/x86/include/asm/pgtable-3level.h
··· 120 write_cr3(pgd); 121 } 122 123 - #define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_PFN_MASK)) 124 125 #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK)) 126 127 128 /* Find an entry in the second-level page table.. */ 129 - #define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \ 130 pmd_index(address)) 131 132 #ifdef CONFIG_SMP
··· 120 write_cr3(pgd); 121 } 122 123 + #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) 124 125 #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK)) 126 127 128 /* Find an entry in the second-level page table.. */ 129 + #define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \ 130 pmd_index(address)) 131 132 #ifdef CONFIG_SMP
+6
arch/x86/include/asm/smp.h
··· 225 226 #endif /* CONFIG_X86_LOCAL_APIC */ 227 228 #endif /* __ASSEMBLY__ */ 229 #endif /* _ASM_X86_SMP_H */
··· 225 226 #endif /* CONFIG_X86_LOCAL_APIC */ 227 228 + #ifdef CONFIG_X86_HAS_BOOT_CPU_ID 229 + extern unsigned char boot_cpu_id; 230 + #else 231 + #define boot_cpu_id 0 232 + #endif 233 + 234 #endif /* __ASSEMBLY__ */ 235 #endif /* _ASM_X86_SMP_H */
+1
arch/x86/include/asm/uv/uv_hub.h
··· 13 14 #include <linux/numa.h> 15 #include <linux/percpu.h> 16 #include <asm/types.h> 17 #include <asm/percpu.h> 18
··· 13 14 #include <linux/numa.h> 15 #include <linux/percpu.h> 16 + #include <linux/timer.h> 17 #include <asm/types.h> 18 #include <asm/percpu.h> 19
+1 -1
arch/x86/kernel/cpu/addon_cpuid_features.c
··· 69 */ 70 void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) 71 { 72 - #ifdef CONFIG_SMP 73 unsigned int eax, ebx, ecx, edx, sub_index; 74 unsigned int ht_mask_width, core_plus_mask_width; 75 unsigned int core_select_mask, core_level_siblings;
··· 69 */ 70 void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) 71 { 72 + #ifdef CONFIG_X86_SMP 73 unsigned int eax, ebx, ecx, edx, sub_index; 74 unsigned int ht_mask_width, core_plus_mask_width; 75 unsigned int core_select_mask, core_level_siblings;
+5 -1
arch/x86/kernel/cpu/common.c
··· 549 this_cpu->c_early_init(c); 550 551 validate_pat_support(c); 552 } 553 554 void __init early_cpu_init(void) ··· 1138 /* 1139 * Boot processor to setup the FP and extended state context info. 1140 */ 1141 - if (!smp_processor_id()) 1142 init_thread_xstate(); 1143 1144 xsave_init();
··· 549 this_cpu->c_early_init(c); 550 551 validate_pat_support(c); 552 + 553 + #ifdef CONFIG_SMP 554 + c->cpu_index = boot_cpu_id; 555 + #endif 556 } 557 558 void __init early_cpu_init(void) ··· 1134 /* 1135 * Boot processor to setup the FP and extended state context info. 1136 */ 1137 + if (smp_processor_id() == boot_cpu_id) 1138 init_thread_xstate(); 1139 1140 xsave_init();
+1 -1
arch/x86/kernel/tsc.c
··· 759 if (!cpu_has_tsc || tsc_unstable) 760 return 1; 761 762 - #ifdef CONFIG_SMP 763 if (apic_is_clustered_box()) 764 return 1; 765 #endif
··· 759 if (!cpu_has_tsc || tsc_unstable) 760 return 1; 761 762 + #ifdef CONFIG_X86_SMP 763 if (apic_is_clustered_box()) 764 return 1; 765 #endif
+1 -1
arch/x86/kernel/vsmp_64.c
··· 78 79 static void __init set_vsmp_pv_ops(void) 80 { 81 - void *address; 82 unsigned int cap, ctl, cfg; 83 84 /* set vSMP magic bits to indicate vSMP capable kernel */
··· 78 79 static void __init set_vsmp_pv_ops(void) 80 { 81 + void __iomem *address; 82 unsigned int cap, ctl, cfg; 83 84 /* set vSMP magic bits to indicate vSMP capable kernel */
+11 -1
arch/x86/mach-voyager/voyager_smp.c
··· 90 static void vic_enable_cpi(void); 91 static void do_boot_cpu(__u8 cpuid); 92 static void do_quad_bootstrap(void); 93 94 int hard_smp_processor_id(void); 95 int safe_smp_processor_id(void); ··· 345 } 346 } 347 348 /* Set up all the basic stuff: read the SMP config and make all the 349 * SMP information reflect only the boot cpu. All others will be 350 * brought on-line later. */ ··· 420 struct cpuinfo_x86 *c = &cpu_data(id); 421 422 *c = boot_cpu_data; 423 424 identify_secondary_cpu(c); 425 } ··· 658 smp_tune_scheduling(); 659 */ 660 smp_store_cpu_info(boot_cpu_id); 661 printk("CPU%d: ", boot_cpu_id); 662 print_cpu_info(&cpu_data(boot_cpu_id)); 663 ··· 712 713 /* Reload the secondary CPUs task structure (this function does not 714 * return ) */ 715 - void __init initialize_secondary(void) 716 { 717 #if 0 718 // AC kernels only
··· 90 static void vic_enable_cpi(void); 91 static void do_boot_cpu(__u8 cpuid); 92 static void do_quad_bootstrap(void); 93 + static void initialize_secondary(void); 94 95 int hard_smp_processor_id(void); 96 int safe_smp_processor_id(void); ··· 344 } 345 } 346 347 + void prefill_possible_map(void) 348 + { 349 + /* This is empty on voyager because we need a much 350 + * earlier detection which is done in find_smp_config */ 351 + } 352 + 353 /* Set up all the basic stuff: read the SMP config and make all the 354 * SMP information reflect only the boot cpu. All others will be 355 * brought on-line later. */ ··· 413 struct cpuinfo_x86 *c = &cpu_data(id); 414 415 *c = boot_cpu_data; 416 + c->cpu_index = id; 417 418 identify_secondary_cpu(c); 419 } ··· 650 smp_tune_scheduling(); 651 */ 652 smp_store_cpu_info(boot_cpu_id); 653 + /* setup the jump vector */ 654 + initial_code = (unsigned long)initialize_secondary; 655 printk("CPU%d: ", boot_cpu_id); 656 print_cpu_info(&cpu_data(boot_cpu_id)); 657 ··· 702 703 /* Reload the secondary CPUs task structure (this function does not 704 * return ) */ 705 + static void __init initialize_secondary(void) 706 { 707 #if 0 708 // AC kernels only
+1 -1
arch/x86/mm/gup.c
··· 233 len = (unsigned long) nr_pages << PAGE_SHIFT; 234 end = start + len; 235 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, 236 - start, len))) 237 goto slow_irqon; 238 239 /*
··· 233 len = (unsigned long) nr_pages << PAGE_SHIFT; 234 end = start + len; 235 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, 236 + (void __user *)start, len))) 237 goto slow_irqon; 238 239 /*
+34 -18
arch/x86/mm/init_64.c
··· 671 unsigned long last_map_addr = 0; 672 unsigned long page_size_mask = 0; 673 unsigned long start_pfn, end_pfn; 674 675 struct map_range mr[NR_RANGE_MR]; 676 int nr_range, i; 677 int use_pse, use_gbpages; 678 679 - printk(KERN_INFO "init_memory_mapping\n"); 680 681 /* 682 * Find space for the kernel direct mapping tables. ··· 711 712 /* head if not big page alignment ?*/ 713 start_pfn = start >> PAGE_SHIFT; 714 - end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT) 715 << (PMD_SHIFT - PAGE_SHIFT); 716 - nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 717 718 /* big page (2M) range*/ 719 - start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) 720 << (PMD_SHIFT - PAGE_SHIFT); 721 - end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT) 722 << (PUD_SHIFT - PAGE_SHIFT); 723 - if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT))) 724 - end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)); 725 - nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 726 - page_size_mask & (1<<PG_LEVEL_2M)); 727 728 /* big page (1G) range */ 729 - start_pfn = end_pfn; 730 - end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); 731 - nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 732 page_size_mask & 733 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); 734 735 /* tail is not big page (1G) alignment */ 736 - start_pfn = end_pfn; 737 - end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); 738 - nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 739 - page_size_mask & (1<<PG_LEVEL_2M)); 740 741 /* tail is not big page (2M) alignment */ 742 - start_pfn = end_pfn; 743 end_pfn = end>>PAGE_SHIFT; 744 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 745 ··· 858 max_pfn_mapped = last_mapped_pfn; 859 860 ret = __add_pages(zone, start_pfn, nr_pages); 861 - WARN_ON(1); 862 863 return ret; 864 }
··· 671 unsigned long last_map_addr = 0; 672 unsigned long page_size_mask = 0; 673 unsigned long start_pfn, end_pfn; 674 + unsigned long pos; 675 676 struct map_range mr[NR_RANGE_MR]; 677 int nr_range, i; 678 int use_pse, use_gbpages; 679 680 + printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); 681 682 /* 683 * Find space for the kernel direct mapping tables. ··· 710 711 /* head if not big page alignment ?*/ 712 start_pfn = start >> PAGE_SHIFT; 713 + pos = start_pfn << PAGE_SHIFT; 714 + end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) 715 << (PMD_SHIFT - PAGE_SHIFT); 716 + if (start_pfn < end_pfn) { 717 + nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 718 + pos = end_pfn << PAGE_SHIFT; 719 + } 720 721 /* big page (2M) range*/ 722 + start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) 723 << (PMD_SHIFT - PAGE_SHIFT); 724 + end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) 725 << (PUD_SHIFT - PAGE_SHIFT); 726 + if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT))) 727 + end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)); 728 + if (start_pfn < end_pfn) { 729 + nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 730 + page_size_mask & (1<<PG_LEVEL_2M)); 731 + pos = end_pfn << PAGE_SHIFT; 732 + } 733 734 /* big page (1G) range */ 735 + start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) 736 + << (PUD_SHIFT - PAGE_SHIFT); 737 + end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); 738 + if (start_pfn < end_pfn) { 739 + nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 740 page_size_mask & 741 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); 742 + pos = end_pfn << PAGE_SHIFT; 743 + } 744 745 /* tail is not big page (1G) alignment */ 746 + start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) 747 + << (PMD_SHIFT - PAGE_SHIFT); 748 + end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); 749 + if (start_pfn < end_pfn) { 750 + nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 751 + page_size_mask & (1<<PG_LEVEL_2M)); 752 + pos = end_pfn << PAGE_SHIFT; 753 + } 754 755 /* tail is not big page (2M) alignment */ 756 + start_pfn = pos>>PAGE_SHIFT; 757 end_pfn = end>>PAGE_SHIFT; 758 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 759 ··· 842 max_pfn_mapped = last_mapped_pfn; 843 844 ret = __add_pages(zone, start_pfn, nr_pages); 845 + WARN_ON_ONCE(ret); 846 847 return ret; 848 }
+11 -11
arch/x86/mm/ioremap.c
··· 387 unsigned long size) 388 { 389 unsigned long flags; 390 - void *ret; 391 int err; 392 393 /* ··· 399 if (err < 0) 400 return NULL; 401 402 - ret = (void *) __ioremap_caller(phys_addr, size, flags, 403 - __builtin_return_address(0)); 404 405 free_memtype(phys_addr, phys_addr + size); 406 - return (void __iomem *)ret; 407 } 408 409 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, ··· 622 __early_set_fixmap(idx, 0, __pgprot(0)); 623 } 624 625 - static void *prev_map[FIX_BTMAPS_SLOTS] __initdata; 626 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; 627 static int __init check_early_ioremap_leak(void) 628 { ··· 645 } 646 late_initcall(check_early_ioremap_leak); 647 648 - static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) 649 { 650 unsigned long offset, last_addr; 651 unsigned int nrpages; ··· 713 if (early_ioremap_debug) 714 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); 715 716 - prev_map[slot] = (void *) (offset + fix_to_virt(idx0)); 717 return prev_map[slot]; 718 } 719 720 /* Remap an IO device */ 721 - void __init *early_ioremap(unsigned long phys_addr, unsigned long size) 722 { 723 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); 724 } 725 726 /* Remap memory */ 727 - void __init *early_memremap(unsigned long phys_addr, unsigned long size) 728 { 729 return __early_ioremap(phys_addr, size, PAGE_KERNEL); 730 } 731 732 - void __init early_iounmap(void *addr, unsigned long size) 733 { 734 unsigned long virt_addr; 735 unsigned long offset; ··· 779 --idx; 780 --nrpages; 781 } 782 - prev_map[slot] = 0; 783 } 784 785 void __this_fixmap_does_not_exist(void)
··· 387 unsigned long size) 388 { 389 unsigned long flags; 390 + void __iomem *ret; 391 int err; 392 393 /* ··· 399 if (err < 0) 400 return NULL; 401 402 + ret = __ioremap_caller(phys_addr, size, flags, 403 + __builtin_return_address(0)); 404 405 free_memtype(phys_addr, phys_addr + size); 406 + return ret; 407 } 408 409 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, ··· 622 __early_set_fixmap(idx, 0, __pgprot(0)); 623 } 624 625 + static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; 626 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; 627 static int __init check_early_ioremap_leak(void) 628 { ··· 645 } 646 late_initcall(check_early_ioremap_leak); 647 648 + static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) 649 { 650 unsigned long offset, last_addr; 651 unsigned int nrpages; ··· 713 if (early_ioremap_debug) 714 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); 715 716 + prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0)); 717 return prev_map[slot]; 718 } 719 720 /* Remap an IO device */ 721 + void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size) 722 { 723 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); 724 } 725 726 /* Remap memory */ 727 + void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size) 728 { 729 return __early_ioremap(phys_addr, size, PAGE_KERNEL); 730 } 731 732 + void __init early_iounmap(void __iomem *addr, unsigned long size) 733 { 734 unsigned long virt_addr; 735 unsigned long offset; ··· 779 --idx; 780 --nrpages; 781 } 782 + prev_map[slot] = NULL; 783 } 784 785 void __this_fixmap_does_not_exist(void)
+4
arch/x86/mm/pat.c
··· 481 return 1; 482 } 483 #else 484 static inline int range_is_allowed(unsigned long pfn, unsigned long size) 485 { 486 u64 from = ((u64)pfn) << PAGE_SHIFT; 487 u64 to = from + size; 488 u64 cursor = from; 489 490 while (cursor < to) { 491 if (!devmem_is_allowed(pfn)) {
··· 481 return 1; 482 } 483 #else 484 + /* This check is needed to avoid cache aliasing when PAT is enabled */ 485 static inline int range_is_allowed(unsigned long pfn, unsigned long size) 486 { 487 u64 from = ((u64)pfn) << PAGE_SHIFT; 488 u64 to = from + size; 489 u64 cursor = from; 490 + 491 + if (!pat_enabled) 492 + return 1; 493 494 while (cursor < to) { 495 if (!devmem_is_allowed(pfn)) {