Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: Local TLB flushing variants for SMP prep.

Rename the existing flush routines to local_ variants for use by
the IPI-backed global flush routines on SMP.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

+52 -36
-1
arch/sh/kernel/sh_ksyms.c
··· 105 105 EXPORT_SYMBOL(clear_user_page); 106 106 #endif 107 107 108 - EXPORT_SYMBOL(flush_tlb_page); 109 108 EXPORT_SYMBOL(__down_trylock); 110 109 111 110 #ifdef CONFIG_SMP
+1 -1
arch/sh/mm/init.c
··· 106 106 107 107 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); 108 108 109 - __flush_tlb_page(get_asid(), addr); 109 + flush_tlb_one(get_asid(), addr); 110 110 } 111 111 112 112 /*
+2 -2
arch/sh/mm/pg-sh4.c
··· 39 39 mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); 40 40 set_pte(pte, entry); 41 41 local_irq_save(flags); 42 - __flush_tlb_page(get_asid(), p3_addr); 42 + flush_tlb_one(get_asid(), p3_addr); 43 43 local_irq_restore(flags); 44 44 update_mmu_cache(NULL, p3_addr, entry); 45 45 __clear_user_page((void *)p3_addr, to); ··· 74 74 mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); 75 75 set_pte(pte, entry); 76 76 local_irq_save(flags); 77 - __flush_tlb_page(get_asid(), p3_addr); 77 + flush_tlb_one(get_asid(), p3_addr); 78 78 local_irq_restore(flags); 79 79 update_mmu_cache(NULL, p3_addr, entry); 80 80 __copy_user_page((void *)p3_addr, from, to);
+10 -10
arch/sh/mm/tlb-flush.c
··· 14 14 #include <asm/tlbflush.h> 15 15 #include <asm/cacheflush.h> 16 16 17 - void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 17 + void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 18 18 { 19 19 unsigned int cpu = smp_processor_id(); 20 20 ··· 31 31 saved_asid = get_asid(); 32 32 set_asid(asid); 33 33 } 34 - __flush_tlb_page(asid, page); 34 + flush_tlb_one(asid, page); 35 35 if (saved_asid != MMU_NO_ASID) 36 36 set_asid(saved_asid); 37 37 local_irq_restore(flags); 38 38 } 39 39 } 40 40 41 - void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 42 - unsigned long end) 41 + void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 42 + unsigned long end) 43 43 { 44 44 struct mm_struct *mm = vma->vm_mm; 45 45 unsigned int cpu = smp_processor_id(); ··· 67 67 set_asid(asid); 68 68 } 69 69 while (start < end) { 70 - __flush_tlb_page(asid, start); 70 + flush_tlb_one(asid, start); 71 71 start += PAGE_SIZE; 72 72 } 73 73 if (saved_asid != MMU_NO_ASID) ··· 77 77 } 78 78 } 79 79 80 - void flush_tlb_kernel_range(unsigned long start, unsigned long end) 80 + void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) 81 81 { 82 82 unsigned int cpu = smp_processor_id(); 83 83 unsigned long flags; ··· 86 86 local_irq_save(flags); 87 87 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 88 88 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ 89 - flush_tlb_all(); 89 + local_flush_tlb_all(); 90 90 } else { 91 91 unsigned long asid; 92 92 unsigned long saved_asid = get_asid(); ··· 97 97 end &= PAGE_MASK; 98 98 set_asid(asid); 99 99 while (start < end) { 100 - __flush_tlb_page(asid, start); 100 + flush_tlb_one(asid, start); 101 101 start += PAGE_SIZE; 102 102 } 103 103 set_asid(saved_asid); ··· 105 105 local_irq_restore(flags); 106 106 } 107 107 108 - void flush_tlb_mm(struct mm_struct *mm) 108 + void local_flush_tlb_mm(struct mm_struct *mm) 109 109 { 110 110 unsigned int cpu = smp_processor_id(); 111 111 ··· 122 122 } 123 123 } 124 124 125 - void flush_tlb_all(void) 125 + void local_flush_tlb_all(void) 126 126 { 127 127 unsigned long flags, status; 128 128
+6 -13
arch/sh/mm/tlb-nommu.c
··· 13 13 /* 14 14 * Nothing too terribly exciting here .. 15 15 */ 16 - 17 - void flush_tlb(void) 16 + void local_flush_tlb_all(void) 18 17 { 19 18 BUG(); 20 19 } 21 20 22 - void flush_tlb_all(void) 21 + void local_flush_tlb_mm(struct mm_struct *mm) 23 22 { 24 23 BUG(); 25 24 } 26 25 27 - void flush_tlb_mm(struct mm_struct *mm) 28 - { 29 - BUG(); 30 - } 31 - 32 - void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 26 + void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 33 27 unsigned long end) 34 28 { 35 29 BUG(); 36 30 } 37 31 38 - void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 32 + void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 39 33 { 40 34 BUG(); 41 35 } 42 36 43 - void __flush_tlb_page(unsigned long asid, unsigned long page) 37 + void local_flush_tlb_one(unsigned long asid, unsigned long page) 44 38 { 45 39 BUG(); 46 40 } 47 41 48 - void flush_tlb_kernel_range(unsigned long start, unsigned long end) 42 + void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) 49 43 { 50 44 BUG(); 51 45 } ··· 49 55 { 50 56 BUG(); 51 57 } 52 -
+1 -1
arch/sh/mm/tlb-sh3.c
··· 12 12 #include <asm/system.h> 13 13 #include <asm/mmu_context.h> 14 14 15 - void __flush_tlb_page(unsigned long asid, unsigned long page) 15 + void local_flush_tlb_one(unsigned long asid, unsigned long page) 16 16 { 17 17 unsigned long addr, data; 18 18 int i, ways = MMU_NTLB_WAYS;
+1 -1
arch/sh/mm/tlb-sh4.c
··· 12 12 #include <asm/system.h> 13 13 #include <asm/mmu_context.h> 14 14 15 - void __flush_tlb_page(unsigned long asid, unsigned long page) 15 + void local_flush_tlb_one(unsigned long asid, unsigned long page) 16 16 { 17 17 unsigned long addr, data; 18 18
+31 -7
include/asm-sh/tlbflush.h
··· 4 4 /* 5 5 * TLB flushing: 6 6 * 7 - * - flush_tlb() flushes the current mm struct TLBs 8 7 * - flush_tlb_all() flushes all processes TLBs 9 8 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 10 9 * - flush_tlb_page(vma, vmaddr) flushes one page ··· 11 12 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 12 13 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 13 14 */ 15 + extern void local_flush_tlb_all(void); 16 + extern void local_flush_tlb_mm(struct mm_struct *mm); 17 + extern void local_flush_tlb_range(struct vm_area_struct *vma, 18 + unsigned long start, 19 + unsigned long end); 20 + extern void local_flush_tlb_page(struct vm_area_struct *vma, 21 + unsigned long page); 22 + extern void local_flush_tlb_kernel_range(unsigned long start, 23 + unsigned long end); 24 + extern void local_flush_tlb_one(unsigned long asid, unsigned long page); 14 25 15 - extern void flush_tlb(void); 26 + #ifdef CONFIG_SMP 27 + 16 28 extern void flush_tlb_all(void); 17 29 extern void flush_tlb_mm(struct mm_struct *mm); 18 30 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 19 31 unsigned long end); 20 32 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 21 - extern void __flush_tlb_page(unsigned long asid, unsigned long page); 33 + extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 34 + extern void flush_tlb_one(unsigned long asid, unsigned long page); 35 + 36 + #else 37 + 38 + #define flush_tlb_all() local_flush_tlb_all() 39 + #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 40 + #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) 41 + #define flush_tlb_one(asid, page) local_flush_tlb_one(asid, page) 42 + 43 + #define flush_tlb_range(vma, start, end) \ 44 + local_flush_tlb_range(vma, start, end) 45 + 46 + #define flush_tlb_kernel_range(start, end) \ 47 + local_flush_tlb_kernel_range(start, end) 48 + 49 + #endif /* CONFIG_SMP */ 22 50 23 51 static inline void flush_tlb_pgtables(struct mm_struct *mm, 24 52 unsigned long start, unsigned long end) 25 - { /* Nothing to do */ 53 + { 54 + /* Nothing to do */ 26 55 } 27 - 28 - extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 29 - 30 56 #endif /* __ASM_SH_TLBFLUSH_H */