Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
"Misc fixes"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mm: Flush lazy MMU when DEBUG_PAGEALLOC is set
x86/mm/cpa/selftest: Fix false positive in CPA self test
x86/mm/cpa: Convert noop to functional fix
x86, mm: Patch out arch_flush_lazy_mmu_mode() when running on bare metal
x86, mm, paravirt: Fix vmalloc_fault oops during lazy MMU updates

Changed files
+33 -21
arch
+4 -1
arch/x86/include/asm/paravirt.h
··· 703 703 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); 704 704 } 705 705 706 - void arch_flush_lazy_mmu_mode(void); 706 + static inline void arch_flush_lazy_mmu_mode(void) 707 + { 708 + PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush); 709 + } 707 710 708 711 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 709 712 phys_addr_t phys, pgprot_t flags)
+2
arch/x86/include/asm/paravirt_types.h
··· 91 91 /* Set deferred update mode, used for batching operations. */ 92 92 void (*enter)(void); 93 93 void (*leave)(void); 94 + void (*flush)(void); 94 95 }; 95 96 96 97 struct pv_time_ops { ··· 680 679 681 680 void paravirt_enter_lazy_mmu(void); 682 681 void paravirt_leave_lazy_mmu(void); 682 + void paravirt_flush_lazy_mmu(void); 683 683 684 684 void _paravirt_nop(void); 685 685 u32 _paravirt_ident_32(u32);
+13 -12
arch/x86/kernel/paravirt.c
··· 263 263 leave_lazy(PARAVIRT_LAZY_MMU); 264 264 } 265 265 266 + void paravirt_flush_lazy_mmu(void) 267 + { 268 + preempt_disable(); 269 + 270 + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { 271 + arch_leave_lazy_mmu_mode(); 272 + arch_enter_lazy_mmu_mode(); 273 + } 274 + 275 + preempt_enable(); 276 + } 277 + 266 278 void paravirt_start_context_switch(struct task_struct *prev) 267 279 { 268 280 BUG_ON(preemptible()); ··· 302 290 return PARAVIRT_LAZY_NONE; 303 291 304 292 return this_cpu_read(paravirt_lazy_mode); 305 - } 306 - 307 - void arch_flush_lazy_mmu_mode(void) 308 - { 309 - preempt_disable(); 310 - 311 - if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { 312 - arch_leave_lazy_mmu_mode(); 313 - arch_enter_lazy_mmu_mode(); 314 - } 315 - 316 - preempt_enable(); 317 293 } 318 294 319 295 struct pv_info pv_info = { ··· 475 475 .lazy_mode = { 476 476 .enter = paravirt_nop, 477 477 .leave = paravirt_nop, 478 + .flush = paravirt_nop, 478 479 }, 479 480 480 481 .set_fixmap = native_set_fixmap,
+1
arch/x86/lguest/boot.c
··· 1334 1334 pv_mmu_ops.read_cr3 = lguest_read_cr3; 1335 1335 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; 1336 1336 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; 1337 + pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu; 1337 1338 pv_mmu_ops.pte_update = lguest_pte_update; 1338 1339 pv_mmu_ops.pte_update_defer = lguest_pte_update; 1339 1340
+4 -2
arch/x86/mm/fault.c
··· 378 378 if (pgd_none(*pgd_ref)) 379 379 return -1; 380 380 381 - if (pgd_none(*pgd)) 381 + if (pgd_none(*pgd)) { 382 382 set_pgd(pgd, *pgd_ref); 383 - else 383 + arch_flush_lazy_mmu_mode(); 384 + } else { 384 385 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 386 + } 385 387 386 388 /* 387 389 * Below here mismatches are bugs because these lower tables
+1 -1
arch/x86/mm/pageattr-test.c
··· 68 68 s->gpg++; 69 69 i += GPS/PAGE_SIZE; 70 70 } else if (level == PG_LEVEL_2M) { 71 - if (!(pte_val(*pte) & _PAGE_PSE)) { 71 + if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) { 72 72 printk(KERN_ERR 73 73 "%lx level %d but not PSE %Lx\n", 74 74 addr, level, (u64)pte_val(*pte));
+7 -5
arch/x86/mm/pageattr.c
··· 467 467 * We are safe now. Check whether the new pgprot is the same: 468 468 */ 469 469 old_pte = *kpte; 470 - old_prot = new_prot = req_prot = pte_pgprot(old_pte); 470 + old_prot = req_prot = pte_pgprot(old_pte); 471 471 472 472 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); 473 473 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); ··· 478 478 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL 479 479 * for the ancient hardware that doesn't support it. 480 480 */ 481 - if (pgprot_val(new_prot) & _PAGE_PRESENT) 482 - pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL; 481 + if (pgprot_val(req_prot) & _PAGE_PRESENT) 482 + pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL; 483 483 else 484 - pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); 484 + pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); 485 485 486 - new_prot = canon_pgprot(new_prot); 486 + req_prot = canon_pgprot(req_prot); 487 487 488 488 /* 489 489 * old_pte points to the large page base address. So we need ··· 1413 1413 * but that can deadlock->flush only current cpu: 1414 1414 */ 1415 1415 __flush_tlb_all(); 1416 + 1417 + arch_flush_lazy_mmu_mode(); 1416 1418 } 1417 1419 1418 1420 #ifdef CONFIG_HIBERNATION
+1
arch/x86/xen/mmu.c
··· 2200 2200 .lazy_mode = { 2201 2201 .enter = paravirt_enter_lazy_mmu, 2202 2202 .leave = xen_leave_lazy_mmu, 2203 + .flush = paravirt_flush_lazy_mmu, 2203 2204 }, 2204 2205 2205 2206 .set_fixmap = xen_set_fixmap,