Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.24 183 lines 4.5 kB view raw
1/* 2 * This file contains the routines for TLB flushing. 3 * On machines where the MMU uses a hash table to store virtual to 4 * physical translations, these routines flush entries from the 5 * hash table also. 6 * -- paulus 7 * 8 * Derived from arch/ppc/mm/init.c: 9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 10 * 11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 12 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 13 * Copyright (C) 1996 Paul Mackerras 14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). 15 * 16 * Derived from "arch/i386/mm/init.c" 17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 18 * 19 * This program is free software; you can redistribute it and/or 20 * modify it under the terms of the GNU General Public License 21 * as published by the Free Software Foundation; either version 22 * 2 of the License, or (at your option) any later version. 23 * 24 */ 25 26#include <linux/kernel.h> 27#include <linux/mm.h> 28#include <linux/init.h> 29#include <linux/highmem.h> 30#include <linux/pagemap.h> 31#include <asm/tlbflush.h> 32#include <asm/tlb.h> 33 34#include "mmu_decl.h" 35 36/* 37 * Called when unmapping pages to flush entries from the TLB/hash table. 38 */ 39void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) 40{ 41 unsigned long ptephys; 42 43 if (Hash != 0) { 44 ptephys = __pa(ptep) & PAGE_MASK; 45 flush_hash_pages(mm->context.id, addr, ptephys, 1); 46 } 47} 48 49/* 50 * Called by ptep_set_access_flags, must flush on CPUs for which the 51 * DSI handler can't just "fixup" the TLB on a write fault 52 */ 53void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr) 54{ 55 if (Hash != 0) 56 return; 57 _tlbie(addr); 58} 59 60/* 61 * Called at the end of a mmu_gather operation to make sure the 62 * TLB flush is completely done. 63 */ 64void tlb_flush(struct mmu_gather *tlb) 65{ 66 if (Hash == 0) { 67 /* 68 * 603 needs to flush the whole TLB here since 69 * it doesn't use a hash table. 70 */ 71 _tlbia(); 72 } 73} 74 75/* 76 * TLB flushing: 77 * 78 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 79 * - flush_tlb_page(vma, vmaddr) flushes one page 80 * - flush_tlb_range(vma, start, end) flushes a range of pages 81 * - flush_tlb_kernel_range(start, end) flushes kernel pages 82 * 83 * since the hardware hash table functions as an extension of the 84 * tlb as far as the linux tables are concerned, flush it too. 85 * -- Cort 86 */ 87 88/* 89 * 750 SMP is a Bad Idea because the 750 doesn't broadcast all 90 * the cache operations on the bus. Hence we need to use an IPI 91 * to get the other CPU(s) to invalidate their TLBs. 92 */ 93#ifdef CONFIG_SMP_750 94#define FINISH_FLUSH smp_send_tlb_invalidate(0) 95#else 96#define FINISH_FLUSH do { } while (0) 97#endif 98 99static void flush_range(struct mm_struct *mm, unsigned long start, 100 unsigned long end) 101{ 102 pmd_t *pmd; 103 unsigned long pmd_end; 104 int count; 105 unsigned int ctx = mm->context.id; 106 107 if (Hash == 0) { 108 _tlbia(); 109 return; 110 } 111 start &= PAGE_MASK; 112 if (start >= end) 113 return; 114 end = (end - 1) | ~PAGE_MASK; 115 pmd = pmd_offset(pgd_offset(mm, start), start); 116 for (;;) { 117 pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; 118 if (pmd_end > end) 119 pmd_end = end; 120 if (!pmd_none(*pmd)) { 121 count = ((pmd_end - start) >> PAGE_SHIFT) + 1; 122 flush_hash_pages(ctx, start, pmd_val(*pmd), count); 123 } 124 if (pmd_end == end) 125 break; 126 start = pmd_end + 1; 127 ++pmd; 128 } 129} 130 131/* 132 * Flush kernel TLB entries in the given range 133 */ 134void flush_tlb_kernel_range(unsigned long start, unsigned long end) 135{ 136 flush_range(&init_mm, start, end); 137 FINISH_FLUSH; 138} 139 140/* 141 * Flush all the (user) entries for the address space described by mm. 142 */ 143void flush_tlb_mm(struct mm_struct *mm) 144{ 145 struct vm_area_struct *mp; 146 147 if (Hash == 0) { 148 _tlbia(); 149 return; 150 } 151 152 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) 153 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); 154 FINISH_FLUSH; 155} 156 157void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 158{ 159 struct mm_struct *mm; 160 pmd_t *pmd; 161 162 if (Hash == 0) { 163 _tlbie(vmaddr); 164 return; 165 } 166 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; 167 pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr); 168 if (!pmd_none(*pmd)) 169 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); 170 FINISH_FLUSH; 171} 172 173/* 174 * For each address in the range, find the pte for the address 175 * and check _PAGE_HASHPTE bit; if it is set, find and destroy 176 * the corresponding HPTE. 177 */ 178void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 179 unsigned long end) 180{ 181 flush_range(vma->vm_mm, start, end); 182 FINISH_FLUSH; 183}