Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.21-rc7 146 lines 4.1 kB view raw
1#ifndef _ASM_POWERPC_TLBFLUSH_H 2#define _ASM_POWERPC_TLBFLUSH_H 3/* 4 * TLB flushing: 5 * 6 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 7 * - flush_tlb_page(vma, vmaddr) flushes one page 8 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB 9 * - flush_tlb_range(vma, start, end) flushes a range of pages 10 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 11 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 */ 18#ifdef __KERNEL__ 19 20 21struct mm_struct; 22 23#ifdef CONFIG_PPC64 24 25#include <linux/percpu.h> 26#include <asm/page.h> 27 28#define PPC64_TLB_BATCH_NR 192 29 30struct ppc64_tlb_batch { 31 unsigned long index; 32 struct mm_struct *mm; 33 real_pte_t pte[PPC64_TLB_BATCH_NR]; 34 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 35 unsigned int psize; 36}; 37DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 38 39extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 40 41static inline void flush_tlb_pending(void) 42{ 43 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); 44 45 if (batch->index) 46 __flush_tlb_pending(batch); 47 put_cpu_var(ppc64_tlb_batch); 48} 49 50extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, 51 int local); 52extern void flush_hash_range(unsigned long number, int local); 53 54#else /* CONFIG_PPC64 */ 55 56#include <linux/mm.h> 57 58extern void _tlbie(unsigned long address); 59extern void _tlbia(void); 60 61/* 62 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & 63 * flush_tlb_kernel_range are best implemented as tlbia vs 64 * specific tlbie's 65 */ 66 67#if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx) 68#define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory") 69#elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE) 70#define flush_tlb_pending() _tlbia() 71#endif 72 73/* 74 * This gets called at the end of handling a page fault, when 75 * the kernel has put a new PTE into the page table for the process. 76 * We use it to ensure coherency between the i-cache and d-cache 77 * for the page which has just been mapped in. 78 * On machines which use an MMU hash table, we use this to put a 79 * corresponding HPTE into the hash table ahead of time, instead of 80 * waiting for the inevitable extra hash-table miss exception. 81 */ 82extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 83 84#endif /* CONFIG_PPC64 */ 85 86#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \ 87 defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx) 88 89static inline void flush_tlb_mm(struct mm_struct *mm) 90{ 91 flush_tlb_pending(); 92} 93 94static inline void flush_tlb_page(struct vm_area_struct *vma, 95 unsigned long vmaddr) 96{ 97#ifdef CONFIG_PPC64 98 flush_tlb_pending(); 99#else 100 _tlbie(vmaddr); 101#endif 102} 103 104static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 105 unsigned long vmaddr) 106{ 107#ifndef CONFIG_PPC64 108 _tlbie(vmaddr); 109#endif 110} 111 112static inline void flush_tlb_range(struct vm_area_struct *vma, 113 unsigned long start, unsigned long end) 114{ 115 flush_tlb_pending(); 116} 117 118static inline void flush_tlb_kernel_range(unsigned long start, 119 unsigned long end) 120{ 121 flush_tlb_pending(); 122} 123 124#else /* 6xx, 7xx, 7xxx cpus */ 125 126extern void flush_tlb_mm(struct mm_struct *mm); 127extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 128extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 129extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 130 unsigned long end); 131extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 132 133#endif 134 135/* 136 * This is called in munmap when we have freed up some page-table 137 * pages. We don't need to do anything here, there's nothing special 138 * about our page-table pages. -- paulus 139 */ 140static inline void flush_tlb_pgtables(struct mm_struct *mm, 141 unsigned long start, unsigned long end) 142{ 143} 144 145#endif /*__KERNEL__ */ 146#endif /* _ASM_POWERPC_TLBFLUSH_H */