Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Refactor update_mmu_cache_range()

On nohash, this function voids except for E500 with hugepages.

On book3s, this function is for hash MMUs only.

Combine those tests and rename E500 update_mmu_cache_range()
as __update_mmu_cache() which gets called by
update_mmu_cache_range().

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/b029842cb6783cbeb43d202e69a90341d65295a4.1695659959.git.christophe.leroy@csgroup.eu

authored by

Christophe Leroy and committed by
Michael Ellerman
da9554e0 93f81f6e

+20 -41
-24
arch/powerpc/include/asm/book3s/pgtable.h
··· 8 8 #include <asm/book3s/32/pgtable.h> 9 9 #endif 10 10 11 - #ifndef __ASSEMBLY__ 12 - void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); 13 - 14 - /* 15 - * This gets called at the end of handling a page fault, when 16 - * the kernel has put a new PTE into the page table for the process. 17 - * We use it to ensure coherency between the i-cache and d-cache 18 - * for the page which has just been mapped in. 19 - * On machines which use an MMU hash table, we use this to put a 20 - * corresponding HPTE into the hash table ahead of time, instead of 21 - * waiting for the inevitable extra hash-table miss exception. 22 - */ 23 - static inline void update_mmu_cache_range(struct vm_fault *vmf, 24 - struct vm_area_struct *vma, unsigned long address, 25 - pte_t *ptep, unsigned int nr) 26 - { 27 - if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE)) 28 - return; 29 - if (radix_enabled()) 30 - return; 31 - __update_mmu_cache(vma, address, ptep); 32 - } 33 - 34 - #endif /* __ASSEMBLY__ */ 35 11 #endif
-15
arch/powerpc/include/asm/nohash/pgtable.h
··· 259 259 #define is_hugepd(hpd) (hugepd_ok(hpd)) 260 260 #endif 261 261 262 - /* 263 - * This gets called at the end of handling a page fault, when 264 - * the kernel has put a new PTE into the page table for the process. 265 - * We use it to ensure coherency between the i-cache and d-cache 266 - * for the page which has just been mapped in. 267 - */ 268 - #if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE) 269 - void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, 270 - unsigned long address, pte_t *ptep, unsigned int nr); 271 - #else 272 - static inline void update_mmu_cache_range(struct vm_fault *vmf, 273 - struct vm_area_struct *vma, unsigned long address, 274 - pte_t *ptep, unsigned int nr) {} 275 - #endif 276 - 277 262 #endif /* __ASSEMBLY__ */ 278 263 #endif
+19
arch/powerpc/include/asm/pgtable.h
··· 119 119 unsigned long size, pgprot_t vma_prot); 120 120 #define __HAVE_PHYS_MEM_ACCESS_PROT 121 121 122 + void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); 123 + 124 + /* 125 + * This gets called at the end of handling a page fault, when 126 + * the kernel has put a new PTE into the page table for the process. 127 + * We use it to ensure coherency between the i-cache and d-cache 128 + * for the page which has just been mapped in. 129 + * On machines which use an MMU hash table, we use this to put a 130 + * corresponding HPTE into the hash table ahead of time, instead of 131 + * waiting for the inevitable extra hash-table miss exception. 132 + */ 133 + static inline void update_mmu_cache_range(struct vm_fault *vmf, 134 + struct vm_area_struct *vma, unsigned long address, 135 + pte_t *ptep, unsigned int nr) 136 + { 137 + if ((mmu_has_feature(MMU_FTR_HPTE_TABLE) && !radix_enabled()) || 138 + (IS_ENABLED(CONFIG_PPC_E500) && IS_ENABLED(CONFIG_HUGETLB_PAGE))) 139 + __update_mmu_cache(vma, address, ptep); 140 + } 122 141 123 142 /* 124 143 * When used, PTE_FRAG_NR is defined in subarch pgtable.h
+1 -2
arch/powerpc/mm/nohash/e500_hugetlbpage.c
··· 178 178 * 179 179 * This must always be called with the pte lock held. 180 180 */ 181 - void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, 182 - unsigned long address, pte_t *ptep, unsigned int nr) 181 + void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 183 182 { 184 183 if (is_vm_hugetlb_page(vma)) 185 184 book3e_hugetlb_preload(vma, address, *ptep);