Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.0-rc7 177 lines 5.0 kB view raw
1#ifndef _ASM_POWERPC_TLBFLUSH_H 2#define _ASM_POWERPC_TLBFLUSH_H 3 4/* 5 * TLB flushing: 6 * 7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 8 * - flush_tlb_page(vma, vmaddr) flushes one page 9 * - local_flush_tlb_mm(mm, full) flushes the specified mm context on 10 * the local processor 11 * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor 12 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB 13 * - flush_tlb_range(vma, start, end) flushes a range of pages 14 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 15 * 16 * This program is free software; you can redistribute it and/or 17 * modify it under the terms of the GNU General Public License 18 * as published by the Free Software Foundation; either version 19 * 2 of the License, or (at your option) any later version. 20 */ 21#ifdef __KERNEL__ 22 23#ifdef CONFIG_PPC_MMU_NOHASH 24/* 25 * TLB flushing for software loaded TLB chips 26 * 27 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & 28 * flush_tlb_kernel_range are best implemented as tlbia vs 29 * specific tlbie's 30 */ 31 32struct vm_area_struct; 33struct mm_struct; 34 35#define MMU_NO_CONTEXT ((unsigned int)-1) 36 37extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 38 unsigned long end); 39extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 40 41extern void local_flush_tlb_mm(struct mm_struct *mm); 42extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 43 44extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 45 int tsize, int ind); 46 47#ifdef CONFIG_SMP 48extern void flush_tlb_mm(struct mm_struct *mm); 49extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 50extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 51 int tsize, int ind); 52#else 53#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 54#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) 55#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i) 56#endif 57#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr) 58 59#elif defined(CONFIG_PPC_STD_MMU_32) 60 61/* 62 * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx 63 */ 64extern void flush_tlb_mm(struct mm_struct *mm); 65extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 66extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 67extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 68 unsigned long end); 69extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 70static inline void local_flush_tlb_page(struct vm_area_struct *vma, 71 unsigned long vmaddr) 72{ 73 flush_tlb_page(vma, vmaddr); 74} 75static inline void local_flush_tlb_mm(struct mm_struct *mm) 76{ 77 flush_tlb_mm(mm); 78} 79 80#elif defined(CONFIG_PPC_STD_MMU_64) 81 82#define MMU_NO_CONTEXT 0 83 84/* 85 * TLB flushing for 64-bit hash-MMU CPUs 86 */ 87 88#include <linux/percpu.h> 89#include <asm/page.h> 90 91#define PPC64_TLB_BATCH_NR 192 92 93struct ppc64_tlb_batch { 94 int active; 95 unsigned long index; 96 struct mm_struct *mm; 97 real_pte_t pte[PPC64_TLB_BATCH_NR]; 98 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 99 unsigned int psize; 100 int ssize; 101}; 102DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 103 104extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 105 106extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 107 pte_t *ptep, unsigned long pte, int huge); 108 109#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 110 111static inline void arch_enter_lazy_mmu_mode(void) 112{ 113 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 114 115 batch->active = 1; 116} 117 118static inline void arch_leave_lazy_mmu_mode(void) 119{ 120 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 121 122 if (batch->index) 123 __flush_tlb_pending(batch); 124 batch->active = 0; 125} 126 127#define arch_flush_lazy_mmu_mode() do {} while (0) 128 129 130extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, 131 int ssize, int local); 132extern void flush_hash_range(unsigned long number, int local); 133 134 135static inline void local_flush_tlb_mm(struct mm_struct *mm) 136{ 137} 138 139static inline void flush_tlb_mm(struct mm_struct *mm) 140{ 141} 142 143static inline void local_flush_tlb_page(struct vm_area_struct *vma, 144 unsigned long vmaddr) 145{ 146} 147 148static inline void flush_tlb_page(struct vm_area_struct *vma, 149 unsigned long vmaddr) 150{ 151} 152 153static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 154 unsigned long vmaddr) 155{ 156} 157 158static inline void flush_tlb_range(struct vm_area_struct *vma, 159 unsigned long start, unsigned long end) 160{ 161} 162 163static inline void flush_tlb_kernel_range(unsigned long start, 164 unsigned long end) 165{ 166} 167 168/* Private function for use by PCI IO mapping code */ 169extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, 170 unsigned long end); 171 172#else 173#error Unsupported MMU type 174#endif 175 176#endif /*__KERNEL__ */ 177#endif /* _ASM_POWERPC_TLBFLUSH_H */