Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: remove ptep_establish()

The last user of ptep_establish in mm/ is long gone. Remove the architecture
primitive as well.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Martin Schwidefsky and committed by
Linus Torvalds
f0e47c22 5ee403f5

+26 -59
+3 -3
include/asm-arm/pgtable.h
··· 83 83 * means that a write to a clean page will cause a permission fault, and 84 84 * the Linux MM layer will mark the page dirty via handle_pte_fault(). 85 85 * For the hardware to notice the permission change, the TLB entry must 86 - * be flushed, and ptep_establish() does that for us. 86 + * be flushed, and ptep_set_access_flags() does that for us. 87 87 * 88 88 * The "accessed" or "young" bit is emulated by a similar method; we only 89 89 * allow accesses to the page if the "young" bit is set. Accesses to the 90 90 * page will cause a fault, and handle_pte_fault() will set the young bit 91 91 * for us as long as the page is marked present in the corresponding Linux 92 - * PTE entry. Again, ptep_establish() will ensure that the TLB is up to 93 - * date. 92 + * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is 93 + * up to date. 94 94 * 95 95 * However, when the "young" bit is cleared, we deny access to the page 96 96 * by clearing the hardware PTE. Currently Linux does not flush the TLB
-19
include/asm-generic/pgtable.h
··· 3 3 4 4 #ifndef __ASSEMBLY__ 5 5 6 - #ifndef __HAVE_ARCH_PTEP_ESTABLISH 7 - /* 8 - * Establish a new mapping: 9 - * - flush the old one 10 - * - update the page tables 11 - * - inform the TLB about the new one 12 - * 13 - * We hold the mm semaphore for reading, and the pte lock. 14 - * 15 - * Note: the old pte is known to not be writable, so we don't need to 16 - * worry about dirty bits etc getting lost. 17 - */ 18 - #define ptep_establish(__vma, __address, __ptep, __entry) \ 19 - do { \ 20 - set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ 21 - flush_tlb_page(__vma, __address); \ 22 - } while (0) 23 - #endif 24 - 25 6 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 26 7 /* 27 8 * Largely same as above, but only sets the access flags (dirty,
-11
include/asm-i386/pgtable.h
··· 311 311 __ret; \ 312 312 }) 313 313 314 - /* 315 - * Rules for using ptep_establish: the pte MUST be a user pte, and 316 - * must be a present->present transition. 317 - */ 318 - #define __HAVE_ARCH_PTEP_ESTABLISH 319 - #define ptep_establish(vma, address, ptep, pteval) \ 320 - do { \ 321 - set_pte_present((vma)->vm_mm, address, ptep, pteval); \ 322 - flush_tlb_page(vma, address); \ 323 - } while (0) 324 - 325 314 #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH 326 315 #define ptep_clear_flush_dirty(vma, address, ptep) \ 327 316 ({ \
+4 -2
include/asm-ia64/pgtable.h
··· 543 543 # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ 544 544 ({ \ 545 545 int __changed = !pte_same(*(__ptep), __entry); \ 546 - if (__changed) \ 547 - ptep_establish(__vma, __addr, __ptep, __entry); \ 546 + if (__changed) { \ 547 + set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \ 548 + flush_tlb_page(__vma, __addr); \ 549 + } \ 548 550 __changed; \ 549 551 }) 550 552 #endif
+19 -24
include/asm-s390/pgtable.h
··· 707 707 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 708 708 } 709 709 710 - static inline pte_t 711 - ptep_clear_flush(struct vm_area_struct *vma, 712 - unsigned long address, pte_t *ptep) 710 + static inline void ptep_invalidate(unsigned long address, pte_t *ptep) 711 + { 712 + __ptep_ipte(address, ptep); 713 + ptep = get_shadow_pte(ptep); 714 + if (ptep) 715 + __ptep_ipte(address, ptep); 716 + } 717 + 718 + static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 719 + unsigned long address, pte_t *ptep) 713 720 { 714 721 pte_t pte = *ptep; 715 - pte_t *shadow_pte = get_shadow_pte(ptep); 716 - 717 - __ptep_ipte(address, ptep); 718 - if (shadow_pte) 719 - __ptep_ipte(address, shadow_pte); 722 + ptep_invalidate(address, ptep); 720 723 return pte; 721 724 } 722 725 ··· 729 726 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 730 727 } 731 728 732 - static inline void 733 - ptep_establish(struct vm_area_struct *vma, 734 - unsigned long address, pte_t *ptep, 735 - pte_t entry) 736 - { 737 - ptep_clear_flush(vma, address, ptep); 738 - set_pte(ptep, entry); 739 - } 740 - 741 - #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 742 - ({ \ 743 - int __changed = !pte_same(*(__ptep), __entry); \ 744 - if (__changed) \ 745 - ptep_establish(__vma, __address, __ptep, __entry); \ 746 - __changed; \ 729 + #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 730 + ({ \ 731 + int __changed = !pte_same(*(__ptep), __entry); \ 732 + if (__changed) { \ 733 + ptep_invalidate(__addr, __ptep); \ 734 + set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ 735 + } \ 736 + __changed; \ 747 737 }) 748 738 749 739 /* ··· 936 940 #define __HAVE_ARCH_MEMMAP_INIT 937 941 extern void memmap_init(unsigned long, int, unsigned long, unsigned long); 938 942 939 - #define __HAVE_ARCH_PTEP_ESTABLISH 940 943 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 941 944 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 942 945 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH