Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/paravirt: Remove paravirt ops pmd_update[_defer] and pte_update_defer

pte_update_defer can be removed as it is always set to the same
function as pte_update. So any usage of pte_update_defer() can be
replaced by pte_update().

pmd_update and pmd_update_defer are always set to paravirt_nop, so they
can just be nuked.

Signed-off-by: Juergen Gross <jgross@suse.com>
Acked-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: jeremy@goop.org
Cc: chrisw@sous-sol.org
Cc: akataria@vmware.com
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xen.org
Cc: konrad.wilk@oracle.com
Cc: david.vrabel@citrix.com
Cc: boris.ostrovsky@oracle.com
Link: http://lkml.kernel.org/r/1447771879-1806-1-git-send-email-jgross@suse.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

authored by

Juergen Gross and committed by
Thomas Gleixner
d6ccc3ec 46095865

+3 -47
-17
arch/x86/include/asm/paravirt.h
··· 366 366 { 367 367 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); 368 368 } 369 - static inline void pmd_update(struct mm_struct *mm, unsigned long addr, 370 - pmd_t *pmdp) 371 - { 372 - PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp); 373 - } 374 - 375 - static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, 376 - pte_t *ptep) 377 - { 378 - PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep); 379 - } 380 - 381 - static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr, 382 - pmd_t *pmdp) 383 - { 384 - PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp); 385 - } 386 369 387 370 static inline pte_t __pte(pteval_t val) 388 371 {
-6
arch/x86/include/asm/paravirt_types.h
··· 266 266 pmd_t *pmdp, pmd_t pmdval); 267 267 void (*pte_update)(struct mm_struct *mm, unsigned long addr, 268 268 pte_t *ptep); 269 - void (*pte_update_defer)(struct mm_struct *mm, 270 - unsigned long addr, pte_t *ptep); 271 - void (*pmd_update)(struct mm_struct *mm, unsigned long addr, 272 - pmd_t *pmdp); 273 - void (*pmd_update_defer)(struct mm_struct *mm, 274 - unsigned long addr, pmd_t *pmdp); 275 269 276 270 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, 277 271 pte_t *ptep);
+2 -13
arch/x86/include/asm/pgtable.h
··· 69 69 #define pmd_clear(pmd) native_pmd_clear(pmd) 70 70 71 71 #define pte_update(mm, addr, ptep) do { } while (0) 72 - #define pte_update_defer(mm, addr, ptep) do { } while (0) 73 - #define pmd_update(mm, addr, ptep) do { } while (0) 74 - #define pmd_update_defer(mm, addr, ptep) do { } while (0) 75 72 76 73 #define pgd_val(x) native_pgd_val(x) 77 74 #define __pgd(x) native_make_pgd(x) ··· 718 721 * updates should either be sets, clears, or set_pte_atomic for P->P 719 722 * transitions, which means this hook should only be called for user PTEs. 720 723 * This hook implies a P->P protection or access change has taken place, which 721 - * requires a subsequent TLB flush. The notification can optionally be delayed 722 - * until the TLB flush event by using the pte_update_defer form of the 723 - * interface, but care must be taken to assure that the flush happens while 724 - * still holding the same page table lock so that the shadow and primary pages 725 - * do not become out of sync on SMP. 724 + * requires a subsequent TLB flush. 726 725 */ 727 726 #define pte_update(mm, addr, ptep) do { } while (0) 728 - #define pte_update_defer(mm, addr, ptep) do { } while (0) 729 727 #endif 730 728 731 729 /* ··· 812 820 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, 813 821 pmd_t *pmdp) 814 822 { 815 - pmd_t pmd = native_pmdp_get_and_clear(pmdp); 816 - pmd_update(mm, addr, pmdp); 817 - return pmd; 823 + return native_pmdp_get_and_clear(pmdp); 818 824 } 819 825 820 826 #define __HAVE_ARCH_PMDP_SET_WRPROTECT ··· 820 830 unsigned long addr, pmd_t *pmdp) 821 831 { 822 832 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); 823 - pmd_update(mm, addr, pmdp); 824 833 } 825 834 826 835 /*
-3
arch/x86/kernel/paravirt.c
··· 426 426 .set_pmd = native_set_pmd, 427 427 .set_pmd_at = native_set_pmd_at, 428 428 .pte_update = paravirt_nop, 429 - .pte_update_defer = paravirt_nop, 430 - .pmd_update = paravirt_nop, 431 - .pmd_update_defer = paravirt_nop, 432 429 433 430 .ptep_modify_prot_start = __ptep_modify_prot_start, 434 431 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
-1
arch/x86/lguest/boot.c
··· 1472 1472 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; 1473 1473 pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu; 1474 1474 pv_mmu_ops.pte_update = lguest_pte_update; 1475 - pv_mmu_ops.pte_update_defer = lguest_pte_update; 1476 1475 1477 1476 #ifdef CONFIG_X86_LOCAL_APIC 1478 1477 /* APIC read/write intercepts */
+1 -6
arch/x86/mm/pgtable.c
··· 414 414 415 415 if (changed && dirty) { 416 416 *ptep = entry; 417 - pte_update_defer(vma->vm_mm, address, ptep); 417 + pte_update(vma->vm_mm, address, ptep); 418 418 } 419 419 420 420 return changed; ··· 431 431 432 432 if (changed && dirty) { 433 433 *pmdp = entry; 434 - pmd_update_defer(vma->vm_mm, address, pmdp); 435 434 /* 436 435 * We had a write-protection fault here and changed the pmd 437 436 * to to more permissive. No need to flush the TLB for that, ··· 467 468 if (pmd_young(*pmdp)) 468 469 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, 469 470 (unsigned long *)pmdp); 470 - 471 - if (ret) 472 - pmd_update(vma->vm_mm, addr, pmdp); 473 471 474 472 return ret; 475 473 } ··· 514 518 set = !test_and_set_bit(_PAGE_BIT_SPLITTING, 515 519 (unsigned long *)pmdp); 516 520 if (set) { 517 - pmd_update(vma->vm_mm, address, pmdp); 518 521 /* need tlb flush only to serialize against gup-fast */ 519 522 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 520 523 }
-1
arch/x86/xen/mmu.c
··· 2436 2436 .flush_tlb_others = xen_flush_tlb_others, 2437 2437 2438 2438 .pte_update = paravirt_nop, 2439 - .pte_update_defer = paravirt_nop, 2440 2439 2441 2440 .pgd_alloc = xen_pgd_alloc, 2442 2441 .pgd_free = xen_pgd_free,