Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()

mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()

Upcoming paches to support the new 64-bit "BookE" powerpc architecture
will need to have the virtual address corresponding to PTE page when
freeing it, due to the way the HW table walker works.

Basically, the TLB can be loaded with "large" pages that cover the whole
virtual space (well, sort-of, half of it actually) represented by a PTE
page, and which contain an "indirect" bit indicating that this TLB entry
RPN points to an array of PTEs from which the TLB can then create direct
entries. Thus, in order to invalidate those when PTE pages are deleted,
we need the virtual address to pass to tlbilx or tlbivax instructions.

The old trick of sticking it somewhere in the PTE page struct page sucks
too much, the address is almost readily available in all call sites and
almost everybody implemets these as macros, so we may as well add the
argument everywhere. I added it to the pmd and pud variants for consistency.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: David Howells <dhowells@redhat.com> [MN10300 & FRV]
Acked-by: Nick Piggin <npiggin@suse.de>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [s390]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Benjamin Herrenschmidt and committed by
Linus Torvalds
9e1b32ca 4be3bd78

+107 -82
+2 -2
arch/alpha/include/asm/tlb.h
··· 9 9 10 10 #include <asm-generic/tlb.h> 11 11 12 - #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) 13 - #define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) 12 + #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) 13 + #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) 14 14 15 15 #endif
+2 -2
arch/arm/include/asm/tlb.h
··· 102 102 } 103 103 104 104 #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) 105 - #define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep) 106 - #define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp) 105 + #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) 106 + #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) 107 107 108 108 #define tlb_migrate_finish(mm) do { } while (0) 109 109
+1 -1
arch/avr32/include/asm/pgalloc.h
··· 83 83 quicklist_free_page(QUICK_PT, NULL, pte); 84 84 } 85 85 86 - #define __pte_free_tlb(tlb,pte) \ 86 + #define __pte_free_tlb(tlb,pte,addr) \ 87 87 do { \ 88 88 pgtable_page_dtor(pte); \ 89 89 tlb_remove_page((tlb), pte); \
+1 -1
arch/cris/include/asm/pgalloc.h
··· 47 47 __free_page(pte); 48 48 } 49 49 50 - #define __pte_free_tlb(tlb,pte) \ 50 + #define __pte_free_tlb(tlb,pte,address) \ 51 51 do { \ 52 52 pgtable_page_dtor(pte); \ 53 53 tlb_remove_page((tlb), pte); \
+2 -2
arch/frv/include/asm/pgalloc.h
··· 49 49 __free_page(pte); 50 50 } 51 51 52 - #define __pte_free_tlb(tlb,pte) \ 52 + #define __pte_free_tlb(tlb,pte,address) \ 53 53 do { \ 54 54 pgtable_page_dtor(pte); \ 55 55 tlb_remove_page((tlb),(pte)); \ ··· 62 62 */ 63 63 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *) 2); }) 64 64 #define pmd_free(mm, x) do { } while (0) 65 - #define __pmd_free_tlb(tlb,x) do { } while (0) 65 + #define __pmd_free_tlb(tlb,x,a) do { } while (0) 66 66 67 67 #endif /* CONFIG_MMU */ 68 68
+1 -1
arch/frv/include/asm/pgtable.h
··· 225 225 */ 226 226 #define pud_alloc_one(mm, address) NULL 227 227 #define pud_free(mm, x) do { } while (0) 228 - #define __pud_free_tlb(tlb, x) do { } while (0) 228 + #define __pud_free_tlb(tlb, x, address) do { } while (0) 229 229 230 230 /* 231 231 * The "pud_xxx()" functions here are trivial for a folded two-level
+3 -3
arch/ia64/include/asm/pgalloc.h
··· 48 48 { 49 49 quicklist_free(0, NULL, pud); 50 50 } 51 - #define __pud_free_tlb(tlb, pud) pud_free((tlb)->mm, pud) 51 + #define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud) 52 52 #endif /* CONFIG_PGTABLE_4 */ 53 53 54 54 static inline void ··· 67 67 quicklist_free(0, NULL, pmd); 68 68 } 69 69 70 - #define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) 70 + #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) 71 71 72 72 static inline void 73 73 pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte) ··· 117 117 quicklist_trim(0, NULL, 25, 16); 118 118 } 119 119 120 - #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) 120 + #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) 121 121 122 122 #endif /* _ASM_IA64_PGALLOC_H */
+6 -6
arch/ia64/include/asm/tlb.h
··· 236 236 __tlb_remove_tlb_entry(tlb, ptep, addr); \ 237 237 } while (0) 238 238 239 - #define pte_free_tlb(tlb, ptep) \ 239 + #define pte_free_tlb(tlb, ptep, address) \ 240 240 do { \ 241 241 tlb->need_flush = 1; \ 242 - __pte_free_tlb(tlb, ptep); \ 242 + __pte_free_tlb(tlb, ptep, address); \ 243 243 } while (0) 244 244 245 - #define pmd_free_tlb(tlb, ptep) \ 245 + #define pmd_free_tlb(tlb, ptep, address) \ 246 246 do { \ 247 247 tlb->need_flush = 1; \ 248 - __pmd_free_tlb(tlb, ptep); \ 248 + __pmd_free_tlb(tlb, ptep, address); \ 249 249 } while (0) 250 250 251 - #define pud_free_tlb(tlb, pudp) \ 251 + #define pud_free_tlb(tlb, pudp, address) \ 252 252 do { \ 253 253 tlb->need_flush = 1; \ 254 - __pud_free_tlb(tlb, pudp); \ 254 + __pud_free_tlb(tlb, pudp, address); \ 255 255 } while (0) 256 256 257 257 #endif /* _ASM_IA64_TLB_H */
+2 -2
arch/m32r/include/asm/pgalloc.h
··· 58 58 __free_page(pte); 59 59 } 60 60 61 - #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) 61 + #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte)) 62 62 63 63 /* 64 64 * allocating and freeing a pmd is trivial: the 1-entry pmd is ··· 68 68 69 69 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) 70 70 #define pmd_free(mm, x) do { } while (0) 71 - #define __pmd_free_tlb(tlb, x) do { } while (0) 71 + #define __pmd_free_tlb(tlb, x, addr) do { } while (0) 72 72 #define pgd_populate(mm, pmd, pte) BUG() 73 73 74 74 #define check_pgt_cache() do { } while (0)
+4 -2
arch/m68k/include/asm/motorola_pgalloc.h
··· 54 54 __free_page(page); 55 55 } 56 56 57 - static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page) 57 + static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, 58 + unsigned long address) 58 59 { 59 60 pgtable_page_dtor(page); 60 61 cache_page(kmap(page)); ··· 74 73 return free_pointer_table(pmd); 75 74 } 76 75 77 - static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 76 + static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 77 + unsigned long address) 78 78 { 79 79 return free_pointer_table(pmd); 80 80 }
+2 -2
arch/m68k/include/asm/sun3_pgalloc.h
··· 32 32 __free_page(page); 33 33 } 34 34 35 - #define __pte_free_tlb(tlb,pte) \ 35 + #define __pte_free_tlb(tlb,pte,addr) \ 36 36 do { \ 37 37 pgtable_page_dtor(pte); \ 38 38 tlb_remove_page((tlb), pte); \ ··· 80 80 * inside the pgd, so has no extra memory associated with it. 81 81 */ 82 82 #define pmd_free(mm, x) do { } while (0) 83 - #define __pmd_free_tlb(tlb, x) do { } while (0) 83 + #define __pmd_free_tlb(tlb, x, addr) do { } while (0) 84 84 85 85 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 86 86 {
+2 -2
arch/microblaze/include/asm/pgalloc.h
··· 180 180 __free_page(ptepage); 181 181 } 182 182 183 - #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) 183 + #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte)) 184 184 185 185 #define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte)) 186 186 ··· 193 193 */ 194 194 #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) 195 195 /*#define pmd_free(mm, x) do { } while (0)*/ 196 - #define __pmd_free_tlb(tlb, x) do { } while (0) 196 + #define __pmd_free_tlb(tlb, x, addr) do { } while (0) 197 197 #define pgd_populate(mm, pmd, pte) BUG() 198 198 199 199 extern int do_check_pgt_cache(int, int);
+3 -3
arch/mips/include/asm/pgalloc.h
··· 98 98 __free_pages(pte, PTE_ORDER); 99 99 } 100 100 101 - #define __pte_free_tlb(tlb,pte) \ 101 + #define __pte_free_tlb(tlb,pte,address) \ 102 102 do { \ 103 103 pgtable_page_dtor(pte); \ 104 104 tlb_remove_page((tlb), pte); \ ··· 111 111 * inside the pgd, so has no extra memory associated with it. 112 112 */ 113 113 #define pmd_free(mm, x) do { } while (0) 114 - #define __pmd_free_tlb(tlb, x) do { } while (0) 114 + #define __pmd_free_tlb(tlb, x, addr) do { } while (0) 115 115 116 116 #endif 117 117 ··· 132 132 free_pages((unsigned long)pmd, PMD_ORDER); 133 133 } 134 134 135 - #define __pmd_free_tlb(tlb, x) pmd_free((tlb)->mm, x) 135 + #define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) 136 136 137 137 #endif 138 138
+1 -1
arch/mn10300/include/asm/pgalloc.h
··· 51 51 } 52 52 53 53 54 - #define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte)) 54 + #define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte)) 55 55 56 56 #endif /* _ASM_PGALLOC_H */
+2 -2
arch/parisc/include/asm/tlb.h
··· 21 21 22 22 #include <asm-generic/tlb.h> 23 23 24 - #define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) 25 - #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) 24 + #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) 25 + #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) 26 26 27 27 #endif
+1 -1
arch/powerpc/include/asm/pgalloc-32.h
··· 16 16 */ 17 17 /* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */ 18 18 #define pmd_free(mm, x) do { } while (0) 19 - #define __pmd_free_tlb(tlb,x) do { } while (0) 19 + #define __pmd_free_tlb(tlb,x,a) do { } while (0) 20 20 /* #define pgd_populate(mm, pmd, pte) BUG() */ 21 21 22 22 #ifndef CONFIG_BOOKE
+2 -2
arch/powerpc/include/asm/pgalloc-64.h
··· 118 118 kmem_cache_free(pgtable_cache[cachenum], p); 119 119 } 120 120 121 - #define __pmd_free_tlb(tlb, pmd) \ 121 + #define __pmd_free_tlb(tlb, pmd,addr) \ 122 122 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 123 123 PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) 124 124 #ifndef CONFIG_PPC_64K_PAGES 125 - #define __pud_free_tlb(tlb, pud) \ 125 + #define __pud_free_tlb(tlb, pud, addr) \ 126 126 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ 127 127 PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) 128 128 #endif /* CONFIG_PPC_64K_PAGES */
+3 -3
arch/powerpc/include/asm/pgalloc.h
··· 38 38 extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); 39 39 40 40 #ifdef CONFIG_SMP 41 - #define __pte_free_tlb(tlb,ptepage) \ 41 + #define __pte_free_tlb(tlb,ptepage,address) \ 42 42 do { \ 43 43 pgtable_page_dtor(ptepage); \ 44 44 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ 45 - PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ 45 + PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ 46 46 } while (0) 47 47 #else 48 - #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) 48 + #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, (pte)) 49 49 #endif 50 50 51 51
+2 -2
arch/powerpc/mm/hugetlbpage.c
··· 305 305 306 306 pmd = pmd_offset(pud, start); 307 307 pud_clear(pud); 308 - pmd_free_tlb(tlb, pmd); 308 + pmd_free_tlb(tlb, pmd, start); 309 309 } 310 310 311 311 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, ··· 348 348 349 349 pud = pud_offset(pgd, start); 350 350 pgd_clear(pgd); 351 - pud_free_tlb(tlb, pud); 351 + pud_free_tlb(tlb, pud, start); 352 352 } 353 353 354 354 /*
+6 -3
arch/s390/include/asm/tlb.h
··· 96 96 * pte_free_tlb frees a pte table and clears the CRSTE for the 97 97 * page table from the tlb. 98 98 */ 99 - static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte) 99 + static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 100 + unsigned long address) 100 101 { 101 102 if (!tlb->fullmm) { 102 103 tlb->array[tlb->nr_ptes++] = pte; ··· 114 113 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB 115 114 * to avoid the double free of the pmd in this case. 116 115 */ 117 - static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 116 + static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 117 + unsigned long address) 118 118 { 119 119 #ifdef __s390x__ 120 120 if (tlb->mm->context.asce_limit <= (1UL << 31)) ··· 136 134 * as the pgd. pud_free_tlb checks the asce_limit against 4TB 137 135 * to avoid the double free of the pud in this case. 138 136 */ 139 - static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) 137 + static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 138 + unsigned long address) 140 139 { 141 140 #ifdef __s390x__ 142 141 if (tlb->mm->context.asce_limit <= (1UL << 42))
+2 -2
arch/sh/include/asm/pgalloc.h
··· 73 73 quicklist_free_page(QUICK_PT, NULL, pte); 74 74 } 75 75 76 - #define __pte_free_tlb(tlb,pte) \ 76 + #define __pte_free_tlb(tlb,pte,addr) \ 77 77 do { \ 78 78 pgtable_page_dtor(pte); \ 79 79 tlb_remove_page((tlb), (pte)); \ ··· 85 85 */ 86 86 87 87 #define pmd_free(mm, x) do { } while (0) 88 - #define __pmd_free_tlb(tlb,x) do { } while (0) 88 + #define __pmd_free_tlb(tlb,x,addr) do { } while (0) 89 89 90 90 static inline void check_pgt_cache(void) 91 91 {
+3 -3
arch/sh/include/asm/tlb.h
··· 91 91 } 92 92 93 93 #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) 94 - #define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep) 95 - #define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp) 96 - #define pud_free_tlb(tlb, pudp) pud_free((tlb)->mm, pudp) 94 + #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) 95 + #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) 96 + #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) 97 97 98 98 #define tlb_migrate_finish(mm) do { } while (0) 99 99
+4 -4
arch/sparc/include/asm/pgalloc_32.h
··· 44 44 BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *) 45 45 #define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd) 46 46 47 - #define pmd_free(mm, pmd) free_pmd_fast(pmd) 48 - #define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) 47 + #define pmd_free(mm, pmd) free_pmd_fast(pmd) 48 + #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) 49 49 50 50 BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *) 51 51 #define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE) ··· 62 62 #define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte) 63 63 64 64 BTFIXUPDEF_CALL(void, pte_free, pgtable_t ) 65 - #define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte) 66 - #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) 65 + #define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte) 66 + #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) 67 67 68 68 #endif /* _SPARC_PGALLOC_H */
+3 -3
arch/sparc/include/asm/tlb_64.h
··· 100 100 } 101 101 102 102 #define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0) 103 - #define pte_free_tlb(mp, ptepage) pte_free((mp)->mm, ptepage) 104 - #define pmd_free_tlb(mp, pmdp) pmd_free((mp)->mm, pmdp) 105 - #define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp) 103 + #define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage) 104 + #define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp) 105 + #define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr) 106 106 107 107 #define tlb_migrate_finish(mm) do { } while (0) 108 108 #define tlb_start_vma(tlb, vma) do { } while (0)
+2 -2
arch/um/include/asm/pgalloc.h
··· 40 40 __free_page(pte); 41 41 } 42 42 43 - #define __pte_free_tlb(tlb,pte) \ 43 + #define __pte_free_tlb(tlb,pte, address) \ 44 44 do { \ 45 45 pgtable_page_dtor(pte); \ 46 46 tlb_remove_page((tlb),(pte)); \ ··· 53 53 free_page((unsigned long)pmd); 54 54 } 55 55 56 - #define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) 56 + #define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x)) 57 57 #endif 58 58 59 59 #define check_pgt_cache() do { } while (0)
+3 -3
arch/um/include/asm/tlb.h
··· 116 116 __tlb_remove_tlb_entry(tlb, ptep, address); \ 117 117 } while (0) 118 118 119 - #define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep) 119 + #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) 120 120 121 - #define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp) 121 + #define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr) 122 122 123 - #define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp) 123 + #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) 124 124 125 125 #define tlb_migrate_finish(mm) do {} while (0) 126 126
+22 -3
arch/x86/include/asm/pgalloc.h
··· 46 46 __free_page(pte); 47 47 } 48 48 49 - extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte); 49 + extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte); 50 + 51 + static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, 52 + unsigned long address) 53 + { 54 + ___pte_free_tlb(tlb, pte); 55 + } 50 56 51 57 static inline void pmd_populate_kernel(struct mm_struct *mm, 52 58 pmd_t *pmd, pte_t *pte) ··· 84 78 free_page((unsigned long)pmd); 85 79 } 86 80 87 - extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); 81 + extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); 82 + 83 + static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 84 + unsigned long adddress) 85 + { 86 + ___pmd_free_tlb(tlb, pmd); 87 + } 88 88 89 89 #ifdef CONFIG_X86_PAE 90 90 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); ··· 120 108 free_page((unsigned long)pud); 121 109 } 122 110 123 - extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); 111 + extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); 112 + 113 + static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 114 + unsigned long address) 115 + { 116 + ___pud_free_tlb(tlb, pud); 117 + } 118 + 124 119 #endif /* PAGETABLE_LEVELS > 3 */ 125 120 #endif /* PAGETABLE_LEVELS > 2 */ 126 121
+3 -3
arch/x86/mm/pgtable.c
··· 25 25 return pte; 26 26 } 27 27 28 - void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) 28 + void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) 29 29 { 30 30 pgtable_page_dtor(pte); 31 31 paravirt_release_pte(page_to_pfn(pte)); ··· 33 33 } 34 34 35 35 #if PAGETABLE_LEVELS > 2 36 - void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 36 + void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 37 37 { 38 38 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); 39 39 tlb_remove_page(tlb, virt_to_page(pmd)); 40 40 } 41 41 42 42 #if PAGETABLE_LEVELS > 3 43 - void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) 43 + void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) 44 44 { 45 45 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); 46 46 tlb_remove_page(tlb, virt_to_page(pud));
+1 -1
arch/xtensa/include/asm/tlb.h
··· 42 42 43 43 #include <asm-generic/tlb.h> 44 44 45 - #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) 45 + #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) 46 46 47 47 #endif /* _XTENSA_TLB_H */
+2 -2
include/asm-generic/4level-fixup.h
··· 27 27 #define pud_page_vaddr(pud) pgd_page_vaddr(pud) 28 28 29 29 #undef pud_free_tlb 30 - #define pud_free_tlb(tlb, x) do { } while (0) 30 + #define pud_free_tlb(tlb, x, addr) do { } while (0) 31 31 #define pud_free(mm, x) do { } while (0) 32 - #define __pud_free_tlb(tlb, x) do { } while (0) 32 + #define __pud_free_tlb(tlb, x, addr) do { } while (0) 33 33 34 34 #undef pud_addr_end 35 35 #define pud_addr_end(addr, end) (end)
+1 -1
include/asm-generic/pgtable-nopmd.h
··· 59 59 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 60 60 { 61 61 } 62 - #define __pmd_free_tlb(tlb, x) do { } while (0) 62 + #define __pmd_free_tlb(tlb, x, a) do { } while (0) 63 63 64 64 #undef pmd_addr_end 65 65 #define pmd_addr_end(addr, end) (end)
+1 -1
include/asm-generic/pgtable-nopud.h
··· 52 52 */ 53 53 #define pud_alloc_one(mm, address) NULL 54 54 #define pud_free(mm, x) do { } while (0) 55 - #define __pud_free_tlb(tlb, x) do { } while (0) 55 + #define __pud_free_tlb(tlb, x, a) do { } while (0) 56 56 57 57 #undef pud_addr_end 58 58 #define pud_addr_end(addr, end) (end)
+6 -6
include/asm-generic/tlb.h
··· 123 123 __tlb_remove_tlb_entry(tlb, ptep, address); \ 124 124 } while (0) 125 125 126 - #define pte_free_tlb(tlb, ptep) \ 126 + #define pte_free_tlb(tlb, ptep, address) \ 127 127 do { \ 128 128 tlb->need_flush = 1; \ 129 - __pte_free_tlb(tlb, ptep); \ 129 + __pte_free_tlb(tlb, ptep, address); \ 130 130 } while (0) 131 131 132 132 #ifndef __ARCH_HAS_4LEVEL_HACK 133 - #define pud_free_tlb(tlb, pudp) \ 133 + #define pud_free_tlb(tlb, pudp, address) \ 134 134 do { \ 135 135 tlb->need_flush = 1; \ 136 - __pud_free_tlb(tlb, pudp); \ 136 + __pud_free_tlb(tlb, pudp, address); \ 137 137 } while (0) 138 138 #endif 139 139 140 - #define pmd_free_tlb(tlb, pmdp) \ 140 + #define pmd_free_tlb(tlb, pmdp, address) \ 141 141 do { \ 142 142 tlb->need_flush = 1; \ 143 - __pmd_free_tlb(tlb, pmdp); \ 143 + __pmd_free_tlb(tlb, pmdp, address); \ 144 144 } while (0) 145 145 146 146 #define tlb_migrate_finish(mm) do {} while (0)
+6 -5
mm/memory.c
··· 135 135 * Note: this doesn't free the actual pages themselves. That 136 136 * has been handled earlier when unmapping all the memory regions. 137 137 */ 138 - static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) 138 + static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, 139 + unsigned long addr) 139 140 { 140 141 pgtable_t token = pmd_pgtable(*pmd); 141 142 pmd_clear(pmd); 142 - pte_free_tlb(tlb, token); 143 + pte_free_tlb(tlb, token, addr); 143 144 tlb->mm->nr_ptes--; 144 145 } 145 146 ··· 158 157 next = pmd_addr_end(addr, end); 159 158 if (pmd_none_or_clear_bad(pmd)) 160 159 continue; 161 - free_pte_range(tlb, pmd); 160 + free_pte_range(tlb, pmd, addr); 162 161 } while (pmd++, addr = next, addr != end); 163 162 164 163 start &= PUD_MASK; ··· 174 173 175 174 pmd = pmd_offset(pud, start); 176 175 pud_clear(pud); 177 - pmd_free_tlb(tlb, pmd); 176 + pmd_free_tlb(tlb, pmd, start); 178 177 } 179 178 180 179 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, ··· 207 206 208 207 pud = pud_offset(pgd, start); 209 208 pgd_clear(pgd); 210 - pud_free_tlb(tlb, pud); 209 + pud_free_tlb(tlb, pud, start); 211 210 } 212 211 213 212 /*