Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

remove unused flush_tlb_pgtables

Nobody uses flush_tlb_pgtables anymore, this patch removes all remaining
traces of it from all archs.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Benjamin Herrenschmidt and committed by
Linus Torvalds
1c7037db 22124c99

+2 -195
+2 -25
Documentation/cachetlb.txt
··· 87 87 88 88 This is used primarily during fault processing. 89 89 90 - 5) void flush_tlb_pgtables(struct mm_struct *mm, 91 - unsigned long start, unsigned long end) 92 - 93 - The software page tables for address space 'mm' for virtual 94 - addresses in the range 'start' to 'end-1' are being torn down. 95 - 96 - Some platforms cache the lowest level of the software page tables 97 - in a linear virtually mapped array, to make TLB miss processing 98 - more efficient. On such platforms, since the TLB is caching the 99 - software page table structure, it needs to be flushed when parts 100 - of the software page table tree are unlinked/freed. 101 - 102 - Sparc64 is one example of a platform which does this. 103 - 104 - Usually, when munmap()'ing an area of user virtual address 105 - space, the kernel leaves the page table parts around and just 106 - marks the individual pte's as invalid. However, if very large 107 - portions of the address space are unmapped, the kernel frees up 108 - those portions of the software page tables to prevent potential 109 - excessive kernel memory usage caused by erratic mmap/mmunmap 110 - sequences. It is at these times that flush_tlb_pgtables will 111 - be invoked. 112 - 113 - 6) void update_mmu_cache(struct vm_area_struct *vma, 90 + 5) void update_mmu_cache(struct vm_area_struct *vma, 114 91 unsigned long address, pte_t pte) 115 92 116 93 At the end of every page fault, this routine is invoked to ··· 100 123 translations for software managed TLB configurations. 101 124 The sparc64 port currently does this. 102 125 103 - 7) void tlb_migrate_finish(struct mm_struct *mm) 126 + 6) void tlb_migrate_finish(struct mm_struct *mm) 104 127 105 128 This interface is called at the end of an explicit 106 129 process migration. This interface provides a hook
-11
include/asm-alpha/tlbflush.h
··· 92 92 if (*mmc) *mmc = 0; 93 93 } 94 94 95 - /* Flush a specified range of user mapping page tables from TLB. 96 - Although Alpha uses VPTE caches, this can be a nop, as Alpha does 97 - not have finegrained tlb flushing, so it will flush VPTE stuff 98 - during next flush_tlb_range. */ 99 - 100 - static inline void 101 - flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, 102 - unsigned long end) 103 - { 104 - } 105 - 106 95 #ifndef CONFIG_SMP 107 96 /* Flush everything (kernel mapping may also have changed 108 97 due to vmalloc/vfree). */
-5
include/asm-arm/tlbflush.h
··· 463 463 */ 464 464 extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); 465 465 466 - /* 467 - * ARM processors do not cache TLB tables in RAM. 468 - */ 469 - #define flush_tlb_pgtables(mm,start,end) do { } while (0) 470 - 471 466 #endif 472 467 473 468 #endif /* CONFIG_MMU */
-7
include/asm-avr32/tlbflush.h
··· 19 19 * - flush_tlb_page(vma, vmaddr) flushes one page 20 20 * - flush_tlb_range(vma, start, end) flushes a range of pages 21 21 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 22 - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 23 22 */ 24 23 extern void flush_tlb(void); 25 24 extern void flush_tlb_all(void); ··· 27 28 unsigned long end); 28 29 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 29 30 extern void __flush_tlb_page(unsigned long asid, unsigned long page); 30 - 31 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 32 - unsigned long start, unsigned long end) 33 - { 34 - /* Nothing to do */ 35 - } 36 31 37 32 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 38 33
-6
include/asm-blackfin/tlbflush.h
··· 53 53 BUG(); 54 54 } 55 55 56 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 57 - unsigned long start, unsigned long end) 58 - { 59 - BUG(); 60 - } 61 - 62 56 #endif
-7
include/asm-cris/tlbflush.h
··· 38 38 flush_tlb_mm(vma->vm_mm); 39 39 } 40 40 41 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 42 - unsigned long start, unsigned long end) 43 - { 44 - /* CRIS does not keep any page table caches in TLB */ 45 - } 46 - 47 - 48 41 static inline void flush_tlb(void) 49 42 { 50 43 flush_tlb_mm(current->mm);
-2
include/asm-frv/tlbflush.h
··· 57 57 #define __flush_tlb_global() flush_tlb_all() 58 58 #define flush_tlb() flush_tlb_all() 59 59 #define flush_tlb_kernel_range(start, end) flush_tlb_all() 60 - #define flush_tlb_pgtables(mm,start,end) do { } while(0) 61 60 62 61 #else 63 62 ··· 65 66 #define flush_tlb_mm(mm) BUG() 66 67 #define flush_tlb_page(vma,addr) BUG() 67 68 #define flush_tlb_range(mm,start,end) BUG() 68 - #define flush_tlb_pgtables(mm,start,end) BUG() 69 69 #define flush_tlb_kernel_range(start, end) BUG() 70 70 71 71 #endif
-6
include/asm-h8300/tlbflush.h
··· 52 52 BUG(); 53 53 } 54 54 55 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 56 - unsigned long start, unsigned long end) 57 - { 58 - BUG(); 59 - } 60 - 61 55 #endif /* _H8300_TLBFLUSH_H */
-13
include/asm-ia64/tlbflush.h
··· 84 84 } 85 85 86 86 /* 87 - * Flush the TLB entries mapping the virtually mapped linear page 88 - * table corresponding to address range [START-END). 89 - */ 90 - static inline void 91 - flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end) 92 - { 93 - /* 94 - * Deprecated. The virtual page table is now flushed via the normal gather/flush 95 - * interface (see tlb.h). 96 - */ 97 - } 98 - 99 - /* 100 87 * Flush the local TLB. Invoked from another cpu using an IPI. 101 88 */ 102 89 #ifdef CONFIG_SMP
-3
include/asm-m32r/tlbflush.h
··· 12 12 * - flush_tlb_page(vma, vmaddr) flushes one page 13 13 * - flush_tlb_range(vma, start, end) flushes a range of pages 14 14 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 15 - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 16 15 */ 17 16 18 17 extern void local_flush_tlb_all(void); ··· 91 92 : "i" (MTOP) : "memory" 92 93 ); 93 94 } 94 - 95 - #define flush_tlb_pgtables(mm, start, end) do { } while (0) 96 95 97 96 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 98 97
-10
include/asm-m68k/tlbflush.h
··· 92 92 flush_tlb_all(); 93 93 } 94 94 95 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 96 - unsigned long start, unsigned long end) 97 - { 98 - } 99 - 100 95 #else 101 96 102 97 ··· 212 217 static inline void flush_tlb_kernel_page (unsigned long addr) 213 218 { 214 219 sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG); 215 - } 216 - 217 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 218 - unsigned long start, unsigned long end) 219 - { 220 220 } 221 221 222 222 #endif
-6
include/asm-m68knommu/tlbflush.h
··· 52 52 BUG(); 53 53 } 54 54 55 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 56 - unsigned long start, unsigned long end) 57 - { 58 - BUG(); 59 - } 60 - 61 55 #endif /* _M68KNOMMU_TLBFLUSH_H */
-7
include/asm-mips/tlbflush.h
··· 11 11 * - flush_tlb_page(vma, vmaddr) flushes one page 12 12 * - flush_tlb_range(vma, start, end) flushes a range of pages 13 13 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 14 - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 15 14 */ 16 15 extern void local_flush_tlb_all(void); 17 16 extern void local_flush_tlb_mm(struct mm_struct *mm); ··· 43 44 #define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr) 44 45 45 46 #endif /* CONFIG_SMP */ 46 - 47 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 48 - unsigned long start, unsigned long end) 49 - { 50 - /* Nothing to do on MIPS. */ 51 - } 52 47 53 48 #endif /* __ASM_TLBFLUSH_H */
-4
include/asm-parisc/tlbflush.h
··· 57 57 #endif 58 58 } 59 59 60 - extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) 61 - { 62 - } 63 - 64 60 static inline void flush_tlb_page(struct vm_area_struct *vma, 65 61 unsigned long addr) 66 62 {
-11
include/asm-powerpc/tlbflush.h
··· 8 8 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB 9 9 * - flush_tlb_range(vma, start, end) flushes a range of pages 10 10 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 11 - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 12 11 * 13 12 * This program is free software; you can redistribute it and/or 14 13 * modify it under the terms of the GNU General Public License ··· 172 173 * waiting for the inevitable extra hash-table miss exception. 173 174 */ 174 175 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 175 - 176 - /* 177 - * This is called in munmap when we have freed up some page-table 178 - * pages. We don't need to do anything here, there's nothing special 179 - * about our page-table pages. -- paulus 180 - */ 181 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 182 - unsigned long start, unsigned long end) 183 - { 184 - } 185 176 186 177 #endif /*__KERNEL__ */ 187 178 #endif /* _ASM_POWERPC_TLBFLUSH_H */
-7
include/asm-s390/tlbflush.h
··· 14 14 * - flush_tlb_page(vma, vmaddr) flushes one page 15 15 * - flush_tlb_range(vma, start, end) flushes a range of pages 16 16 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 17 - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 18 17 */ 19 18 20 19 /* ··· 150 151 #define flush_tlb_kernel_range(start, end) global_flush_tlb() 151 152 152 153 #endif 153 - 154 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 155 - unsigned long start, unsigned long end) 156 - { 157 - /* S/390 does not keep any page table caches in TLB */ 158 - } 159 154 160 155 #endif /* _S390_TLBFLUSH_H */
-6
include/asm-sh/tlbflush.h
··· 9 9 * - flush_tlb_page(vma, vmaddr) flushes one page 10 10 * - flush_tlb_range(vma, start, end) flushes a range of pages 11 11 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 12 - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 13 12 */ 14 13 extern void local_flush_tlb_all(void); 15 14 extern void local_flush_tlb_mm(struct mm_struct *mm); ··· 46 47 47 48 #endif /* CONFIG_SMP */ 48 49 49 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 50 - unsigned long start, unsigned long end) 51 - { 52 - /* Nothing to do */ 53 - } 54 50 #endif /* __ASM_SH_TLBFLUSH_H */
-4
include/asm-sh64/tlbflush.h
··· 20 20 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 21 21 unsigned long end); 22 22 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 23 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 24 - unsigned long start, unsigned long end) 25 - { 26 - } 27 23 28 24 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 29 25
-6
include/asm-sparc/tlbflush.h
··· 13 13 * - flush_tlb_page(vma, vmaddr) flushes one page 14 14 * - flush_tlb_range(vma, start, end) flushes a range of pages 15 15 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 16 - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 17 16 */ 18 17 19 18 #ifdef CONFIG_SMP ··· 40 41 BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *) 41 42 BTFIXUPDEF_CALL(void, flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long) 42 43 BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long) 43 - 44 - // Thanks to Anton Blanchard, our pagetables became uncached in 2.4. Wee! 45 - // extern void flush_tlb_pgtables(struct mm_struct *mm, 46 - // unsigned long start, unsigned long end); 47 - #define flush_tlb_pgtables(mm, start, end) do{ }while(0) 48 44 49 45 #define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)() 50 46 #define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
-7
include/asm-sparc64/tlbflush.h
··· 41 41 42 42 #endif /* ! CONFIG_SMP */ 43 43 44 - static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) 45 - { 46 - /* We don't use virtual page tables for TLB miss processing 47 - * any more. Nowadays we use the TSB. 48 - */ 49 - } 50 - 51 44 #endif /* _SPARC64_TLBFLUSH_H */
-6
include/asm-um/tlbflush.h
··· 17 17 * - flush_tlb_page(vma, vmaddr) flushes one page 18 18 * - flush_tlb_kernel_vm() flushes the kernel vm area 19 19 * - flush_tlb_range(vma, start, end) flushes a range of pages 20 - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 21 20 */ 22 21 23 22 extern void flush_tlb_all(void); ··· 27 28 extern void flush_tlb_kernel_vm(void); 28 29 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 29 30 extern void __flush_tlb_one(unsigned long addr); 30 - 31 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 32 - unsigned long start, unsigned long end) 33 - { 34 - } 35 31 36 32 #endif
-6
include/asm-v850/tlbflush.h
··· 61 61 BUG (); 62 62 } 63 63 64 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 65 - unsigned long start, unsigned long end) 66 - { 67 - BUG (); 68 - } 69 - 70 64 #endif /* __V850_TLBFLUSH_H__ */
-7
include/asm-x86/tlbflush_32.h
··· 78 78 * - flush_tlb_page(vma, vmaddr) flushes one page 79 79 * - flush_tlb_range(vma, start, end) flushes a range of pages 80 80 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 81 - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 82 81 * - flush_tlb_others(cpumask, mm, va) flushes a TLBs on other cpus 83 82 * 84 83 * ..but the i386 has somewhat limited tlb flushing capabilities, ··· 163 164 unsigned long end) 164 165 { 165 166 flush_tlb_all(); 166 - } 167 - 168 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 169 - unsigned long start, unsigned long end) 170 - { 171 - /* i386 does not keep any page table caches in TLB */ 172 167 } 173 168 174 169 #endif /* _I386_TLBFLUSH_H */
-9
include/asm-x86/tlbflush_64.h
··· 31 31 * - flush_tlb_page(vma, vmaddr) flushes one page 32 32 * - flush_tlb_range(vma, start, end) flushes a range of pages 33 33 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 34 - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 35 34 * 36 35 * x86-64 can only flush individual pages or full VMs. For a range flush 37 36 * we always do the full VM. Might be worth trying if for a small ··· 95 96 unsigned long end) 96 97 { 97 98 flush_tlb_all(); 98 - } 99 - 100 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 101 - unsigned long start, unsigned long end) 102 - { 103 - /* x86_64 does not keep any page table caches in a software TLB. 104 - The CPUs do in their hardware TLBs, but they are handled 105 - by the normal TLB flushing algorithms. */ 106 99 } 107 100 108 101 #endif /* _X8664_TLBFLUSH_H */
-11
include/asm-xtensa/tlbflush.h
··· 41 41 42 42 #define flush_tlb_kernel_range(start,end) flush_tlb_all() 43 43 44 - 45 - /* This is calld in munmap when we have freed up some page-table pages. 46 - * We don't need to do anything here, there's nothing special about our 47 - * page-table pages. 48 - */ 49 - 50 - static inline void flush_tlb_pgtables(struct mm_struct *mm, 51 - unsigned long start, unsigned long end) 52 - { 53 - } 54 - 55 44 /* TLB operations. */ 56 45 57 46 static inline unsigned long itlb_probe(unsigned long addr)
-3
mm/memory.c
··· 259 259 continue; 260 260 free_pud_range(*tlb, pgd, addr, next, floor, ceiling); 261 261 } while (pgd++, addr = next, addr != end); 262 - 263 - if (!(*tlb)->fullmm) 264 - flush_tlb_pgtables((*tlb)->mm, start, end); 265 262 } 266 263 267 264 void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,