Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
resource: Fix broken indentation
resource: Fix generic page_is_ram() for partial RAM pages
x86, paravirt: Remove kmap_atomic_pte paravirt op.
x86, vmi: Disable highmem PTE allocation even when CONFIG_HIGHPTE=y
x86, xen: Disable highmem PTE allocation even when CONFIG_HIGHPTE=y

+21 -76
-4
arch/x86/include/asm/highmem.h
··· 66 66 void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 67 67 struct page *kmap_atomic_to_page(void *ptr); 68 68 69 - #ifndef CONFIG_PARAVIRT 70 - #define kmap_atomic_pte(page, type) kmap_atomic(page, type) 71 - #endif 72 - 73 69 #define flush_cache_kmaps() do { } while (0) 74 70 75 71 extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
-9
arch/x86/include/asm/paravirt.h
··· 435 435 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); 436 436 } 437 437 438 - #ifdef CONFIG_HIGHPTE 439 - static inline void *kmap_atomic_pte(struct page *page, enum km_type type) 440 - { 441 - unsigned long ret; 442 - ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type); 443 - return (void *)ret; 444 - } 445 - #endif 446 - 447 438 static inline void pte_update(struct mm_struct *mm, unsigned long addr, 448 439 pte_t *ptep) 449 440 {
-4
arch/x86/include/asm/paravirt_types.h
··· 304 304 #endif /* PAGETABLE_LEVELS == 4 */ 305 305 #endif /* PAGETABLE_LEVELS >= 3 */ 306 306 307 - #ifdef CONFIG_HIGHPTE 308 - void *(*kmap_atomic_pte)(struct page *page, enum km_type type); 309 - #endif 310 - 311 307 struct pv_lazy_ops lazy_mode; 312 308 313 309 /* dom0 ops */
+2 -2
arch/x86/include/asm/pgtable_32.h
··· 54 54 in_irq() ? KM_IRQ_PTE : \ 55 55 KM_PTE0) 56 56 #define pte_offset_map(dir, address) \ 57 - ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \ 57 + ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \ 58 58 pte_index((address))) 59 59 #define pte_offset_map_nested(dir, address) \ 60 - ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ 60 + ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \ 61 61 pte_index((address))) 62 62 #define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE) 63 63 #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
-4
arch/x86/kernel/paravirt.c
··· 428 428 .ptep_modify_prot_start = __ptep_modify_prot_start, 429 429 .ptep_modify_prot_commit = __ptep_modify_prot_commit, 430 430 431 - #ifdef CONFIG_HIGHPTE 432 - .kmap_atomic_pte = kmap_atomic, 433 - #endif 434 - 435 431 #if PAGETABLE_LEVELS >= 3 436 432 #ifdef CONFIG_X86_PAE 437 433 .set_pte_atomic = native_set_pte_atomic,
+7 -28
arch/x86/kernel/vmi_32.c
··· 33 33 #include <asm/fixmap.h> 34 34 #include <asm/apicdef.h> 35 35 #include <asm/apic.h> 36 + #include <asm/pgalloc.h> 36 37 #include <asm/processor.h> 37 38 #include <asm/timer.h> 38 39 #include <asm/vmi_time.h> ··· 266 265 static void vmi_nop(void) 267 266 { 268 267 } 269 - 270 - #ifdef CONFIG_HIGHPTE 271 - static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type) 272 - { 273 - void *va = kmap_atomic(page, type); 274 - 275 - /* 276 - * Internally, the VMI ROM must map virtual addresses to physical 277 - * addresses for processing MMU updates. By the time MMU updates 278 - * are issued, this information is typically already lost. 279 - * Fortunately, the VMI provides a cache of mapping slots for active 280 - * page tables. 281 - * 282 - * We use slot zero for the linear mapping of physical memory, and 283 - * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1. 284 - * 285 - * args: SLOT VA COUNT PFN 286 - */ 287 - BUG_ON(type != KM_PTE0 && type != KM_PTE1); 288 - vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page)); 289 - 290 - return va; 291 - } 292 - #endif 293 268 294 269 static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn) 295 270 { ··· 617 640 u64 reloc; 618 641 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; 619 642 643 + /* 644 + * Prevent page tables from being allocated in highmem, even if 645 + * CONFIG_HIGHPTE is enabled. 646 + */ 647 + __userpte_alloc_gfp &= ~__GFP_HIGHMEM; 648 + 620 649 if (call_vrom_func(vmi_rom, vmi_init) != 0) { 621 650 printk(KERN_ERR "VMI ROM failed to initialize!"); 622 651 return 0; ··· 761 778 762 779 /* Set linear is needed in all cases */ 763 780 vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); 764 - #ifdef CONFIG_HIGHPTE 765 - if (vmi_ops.set_linear_mapping) 766 - pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte; 767 - #endif 768 781 769 782 /* 770 783 * These MUST always be patched. Don't support indirect jumps
+7
arch/x86/xen/enlighten.c
··· 50 50 #include <asm/traps.h> 51 51 #include <asm/setup.h> 52 52 #include <asm/desc.h> 53 + #include <asm/pgalloc.h> 53 54 #include <asm/pgtable.h> 54 55 #include <asm/tlbflush.h> 55 56 #include <asm/reboot.h> ··· 1094 1093 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); 1095 1094 1096 1095 __supported_pte_mask |= _PAGE_IOMAP; 1096 + 1097 + /* 1098 + * Prevent page tables from being allocated in highmem, even 1099 + * if CONFIG_HIGHPTE is enabled. 1100 + */ 1101 + __userpte_alloc_gfp &= ~__GFP_HIGHMEM; 1097 1102 1098 1103 /* Work out if we support NX */ 1099 1104 x86_configure_nx();
-21
arch/x86/xen/mmu.c
··· 1427 1427 #endif 1428 1428 } 1429 1429 1430 - #ifdef CONFIG_HIGHPTE 1431 - static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) 1432 - { 1433 - pgprot_t prot = PAGE_KERNEL; 1434 - 1435 - if (PagePinned(page)) 1436 - prot = PAGE_KERNEL_RO; 1437 - 1438 - if (0 && PageHighMem(page)) 1439 - printk("mapping highpte %lx type %d prot %s\n", 1440 - page_to_pfn(page), type, 1441 - (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ"); 1442 - 1443 - return kmap_atomic_prot(page, type, prot); 1444 - } 1445 - #endif 1446 - 1447 1430 #ifdef CONFIG_X86_32 1448 1431 static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) 1449 1432 { ··· 1884 1901 .alloc_pmd = xen_alloc_pmd_init, 1885 1902 .alloc_pmd_clone = paravirt_nop, 1886 1903 .release_pmd = xen_release_pmd_init, 1887 - 1888 - #ifdef CONFIG_HIGHPTE 1889 - .kmap_atomic_pte = xen_kmap_atomic_pte, 1890 - #endif 1891 1904 1892 1905 #ifdef CONFIG_X86_64 1893 1906 .set_pte = xen_set_pte,
+5 -4
kernel/resource.c
··· 304 304 void *arg, int (*func)(unsigned long, unsigned long, void *)) 305 305 { 306 306 struct resource res; 307 - unsigned long pfn, len; 307 + unsigned long pfn, end_pfn; 308 308 u64 orig_end; 309 309 int ret = -1; 310 310 ··· 314 314 orig_end = res.end; 315 315 while ((res.start < res.end) && 316 316 (find_next_system_ram(&res, "System RAM") >= 0)) { 317 - pfn = (unsigned long)(res.start >> PAGE_SHIFT); 318 - len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT); 319 - ret = (*func)(pfn, len, arg); 317 + pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; 318 + end_pfn = (res.end + 1) >> PAGE_SHIFT; 319 + if (end_pfn > pfn) 320 + ret = (*func)(pfn, end_pfn - pfn, arg); 320 321 if (ret) 321 322 break; 322 323 res.start = res.end + 1;