at v6.19 119 lines 2.6 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _ALPHA_TLBFLUSH_H 3#define _ALPHA_TLBFLUSH_H 4 5#include <linux/mm.h> 6#include <linux/sched.h> 7#include <asm/compiler.h> 8 9#ifndef __EXTERN_INLINE 10#define __EXTERN_INLINE extern inline 11#define __MMU_EXTERN_INLINE 12#endif 13 14extern void __load_new_mm_context(struct mm_struct *); 15 16 17__EXTERN_INLINE void 18ev5_flush_tlb_current(struct mm_struct *mm) 19{ 20 __load_new_mm_context(mm); 21} 22 23/* Flush just one page in the current TLB set. We need to be very 24 careful about the icache here, there is no way to invalidate a 25 specific icache page. */ 26 27__EXTERN_INLINE void 28ev5_flush_tlb_current_page(struct mm_struct * mm, 29 struct vm_area_struct *vma, 30 unsigned long addr) 31{ 32 if (vma->vm_flags & VM_EXEC) 33 __load_new_mm_context(mm); 34 else 35 tbi(2, addr); 36} 37 38 39#define flush_tlb_current ev5_flush_tlb_current 40#define flush_tlb_current_page ev5_flush_tlb_current_page 41 42#ifdef __MMU_EXTERN_INLINE 43#undef __EXTERN_INLINE 44#undef __MMU_EXTERN_INLINE 45#endif 46 47/* Flush current user mapping. */ 48static inline void 49flush_tlb(void) 50{ 51 flush_tlb_current(current->active_mm); 52} 53 54/* Flush someone else's user mapping. */ 55static inline void 56flush_tlb_other(struct mm_struct *mm) 57{ 58 unsigned long *mmc = &mm->context[smp_processor_id()]; 59 /* Check it's not zero first to avoid cacheline ping pong 60 when possible. */ 61 if (*mmc) *mmc = 0; 62} 63 64#ifndef CONFIG_SMP 65/* Flush everything (kernel mapping may also have changed 66 due to vmalloc/vfree). */ 67static inline void flush_tlb_all(void) 68{ 69 tbia(); 70} 71 72/* Flush a specified user mapping. */ 73static inline void 74flush_tlb_mm(struct mm_struct *mm) 75{ 76 if (mm == current->active_mm) 77 flush_tlb_current(mm); 78 else 79 flush_tlb_other(mm); 80} 81 82/* Page-granular tlb flush. */ 83static inline void 84flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) 85{ 86 struct mm_struct *mm = vma->vm_mm; 87 88 if (mm == current->active_mm) 89 flush_tlb_current_page(mm, vma, addr); 90 else 91 flush_tlb_other(mm); 92} 93 94/* Flush a specified range of user mapping. On the Alpha we flush 95 the whole user tlb. */ 96static inline void 97flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 98 unsigned long end) 99{ 100 flush_tlb_mm(vma->vm_mm); 101} 102 103#else /* CONFIG_SMP */ 104 105extern void flush_tlb_all(void); 106extern void flush_tlb_mm(struct mm_struct *); 107extern void flush_tlb_page(struct vm_area_struct *, unsigned long); 108extern void flush_tlb_range(struct vm_area_struct *, unsigned long, 109 unsigned long); 110 111#endif /* CONFIG_SMP */ 112 113static inline void flush_tlb_kernel_range(unsigned long start, 114 unsigned long end) 115{ 116 flush_tlb_all(); 117} 118 119#endif /* _ALPHA_TLBFLUSH_H */