Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.25-rc1 177 lines 4.8 kB view raw
1#ifndef _ASM_POWERPC_TLBFLUSH_H 2#define _ASM_POWERPC_TLBFLUSH_H 3 4/* 5 * TLB flushing: 6 * 7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 8 * - flush_tlb_page(vma, vmaddr) flushes one page 9 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB 10 * - flush_tlb_range(vma, start, end) flushes a range of pages 11 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 */ 18#ifdef __KERNEL__ 19 20#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) 21/* 22 * TLB flushing for software loaded TLB chips 23 * 24 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & 25 * flush_tlb_kernel_range are best implemented as tlbia vs 26 * specific tlbie's 27 */ 28 29#include <linux/mm.h> 30 31extern void _tlbie(unsigned long address, unsigned int pid); 32 33#if defined(CONFIG_40x) || defined(CONFIG_8xx) 34#define _tlbia() asm volatile ("tlbia; sync" : : : "memory") 35#else /* CONFIG_44x || CONFIG_FSL_BOOKE */ 36extern void _tlbia(void); 37#endif 38 39static inline void flush_tlb_mm(struct mm_struct *mm) 40{ 41 _tlbia(); 42} 43 44static inline void flush_tlb_page(struct vm_area_struct *vma, 45 unsigned long vmaddr) 46{ 47 _tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0); 48} 49 50static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 51 unsigned long vmaddr) 52{ 53 _tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0); 54} 55 56static inline void flush_tlb_range(struct vm_area_struct *vma, 57 unsigned long start, unsigned long end) 58{ 59 _tlbia(); 60} 61 62static inline void flush_tlb_kernel_range(unsigned long start, 63 unsigned long end) 64{ 65 _tlbia(); 66} 67 68#elif defined(CONFIG_PPC32) 69/* 70 * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx 71 */ 72extern void _tlbie(unsigned long address); 73extern void _tlbia(void); 74 75extern void flush_tlb_mm(struct mm_struct *mm); 76extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 77extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 78extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 79 unsigned long end); 80extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 81 82#else 83/* 84 * TLB flushing for 64-bit has-MMU CPUs 85 */ 86 87#include <linux/percpu.h> 88#include <asm/page.h> 89 90#define PPC64_TLB_BATCH_NR 192 91 92struct ppc64_tlb_batch { 93 int active; 94 unsigned long index; 95 struct mm_struct *mm; 96 real_pte_t pte[PPC64_TLB_BATCH_NR]; 97 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 98 unsigned int psize; 99 int ssize; 100}; 101DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 102 103extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 104 105extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 106 pte_t *ptep, unsigned long pte, int huge); 107 108#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 109 110static inline void arch_enter_lazy_mmu_mode(void) 111{ 112 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 113 114 batch->active = 1; 115} 116 117static inline void arch_leave_lazy_mmu_mode(void) 118{ 119 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 120 121 if (batch->index) 122 __flush_tlb_pending(batch); 123 batch->active = 0; 124} 125 126#define arch_flush_lazy_mmu_mode() do {} while (0) 127 128 129extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, 130 int ssize, int local); 131extern void flush_hash_range(unsigned long number, int local); 132 133 134static inline void flush_tlb_mm(struct mm_struct *mm) 135{ 136} 137 138static inline void flush_tlb_page(struct vm_area_struct *vma, 139 unsigned long vmaddr) 140{ 141} 142 143static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 144 unsigned long vmaddr) 145{ 146} 147 148static inline void flush_tlb_range(struct vm_area_struct *vma, 149 unsigned long start, unsigned long end) 150{ 151} 152 153static inline void flush_tlb_kernel_range(unsigned long start, 154 unsigned long end) 155{ 156} 157 158/* Private function for use by PCI IO mapping code */ 159extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, 160 unsigned long end); 161 162 163#endif 164 165/* 166 * This gets called at the end of handling a page fault, when 167 * the kernel has put a new PTE into the page table for the process. 168 * We use it to ensure coherency between the i-cache and d-cache 169 * for the page which has just been mapped in. 170 * On machines which use an MMU hash table, we use this to put a 171 * corresponding HPTE into the hash table ahead of time, instead of 172 * waiting for the inevitable extra hash-table miss exception. 173 */ 174extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 175 176#endif /*__KERNEL__ */ 177#endif /* _ASM_POWERPC_TLBFLUSH_H */