Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.15-rc2 149 lines 3.9 kB view raw
1/* asm-generic/tlb.h 2 * 3 * Generic TLB shootdown code 4 * 5 * Copyright 2001 Red Hat, Inc. 6 * Based on code from mm/memory.c Copyright Linus Torvalds and others. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13#ifndef _ASM_GENERIC__TLB_H 14#define _ASM_GENERIC__TLB_H 15 16#include <linux/config.h> 17#include <linux/swap.h> 18#include <asm/pgalloc.h> 19#include <asm/tlbflush.h> 20 21/* 22 * For UP we don't need to worry about TLB flush 23 * and page free order so much.. 24 */ 25#ifdef CONFIG_SMP 26 #ifdef ARCH_FREE_PTR_NR 27 #define FREE_PTR_NR ARCH_FREE_PTR_NR 28 #else 29 #define FREE_PTE_NR 506 30 #endif 31 #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) 32#else 33 #define FREE_PTE_NR 1 34 #define tlb_fast_mode(tlb) 1 35#endif 36 37/* struct mmu_gather is an opaque type used by the mm code for passing around 38 * any data needed by arch specific code for tlb_remove_page. 39 */ 40struct mmu_gather { 41 struct mm_struct *mm; 42 unsigned int nr; /* set to ~0U means fast mode */ 43 unsigned int need_flush;/* Really unmapped some ptes? */ 44 unsigned int fullmm; /* non-zero means full mm flush */ 45 struct page * pages[FREE_PTE_NR]; 46}; 47 48/* Users of the generic TLB shootdown code must declare this storage space. */ 49DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 50 51/* tlb_gather_mmu 52 * Return a pointer to an initialized struct mmu_gather. 53 */ 54static inline struct mmu_gather * 55tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 56{ 57 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 58 59 tlb->mm = mm; 60 61 /* Use fast mode if only one CPU is online */ 62 tlb->nr = num_online_cpus() > 1 ? 0U : ~0U; 63 64 tlb->fullmm = full_mm_flush; 65 66 return tlb; 67} 68 69static inline void 70tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 71{ 72 if (!tlb->need_flush) 73 return; 74 tlb->need_flush = 0; 75 tlb_flush(tlb); 76 if (!tlb_fast_mode(tlb)) { 77 free_pages_and_swap_cache(tlb->pages, tlb->nr); 78 tlb->nr = 0; 79 } 80} 81 82/* tlb_finish_mmu 83 * Called at the end of the shootdown operation to free up any resources 84 * that were required. 85 */ 86static inline void 87tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 88{ 89 tlb_flush_mmu(tlb, start, end); 90 91 /* keep the page table cache within bounds */ 92 check_pgt_cache(); 93 94 put_cpu_var(mmu_gathers); 95} 96 97/* tlb_remove_page 98 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while 99 * handling the additional races in SMP caused by other CPUs caching valid 100 * mappings in their TLBs. 101 */ 102static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 103{ 104 tlb->need_flush = 1; 105 if (tlb_fast_mode(tlb)) { 106 free_page_and_swap_cache(page); 107 return; 108 } 109 tlb->pages[tlb->nr++] = page; 110 if (tlb->nr >= FREE_PTE_NR) 111 tlb_flush_mmu(tlb, 0, 0); 112} 113 114/** 115 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. 116 * 117 * Record the fact that pte's were really umapped in ->need_flush, so we can 118 * later optimise away the tlb invalidate. This helps when userspace is 119 * unmapping already-unmapped pages, which happens quite a lot. 120 */ 121#define tlb_remove_tlb_entry(tlb, ptep, address) \ 122 do { \ 123 tlb->need_flush = 1; \ 124 __tlb_remove_tlb_entry(tlb, ptep, address); \ 125 } while (0) 126 127#define pte_free_tlb(tlb, ptep) \ 128 do { \ 129 tlb->need_flush = 1; \ 130 __pte_free_tlb(tlb, ptep); \ 131 } while (0) 132 133#ifndef __ARCH_HAS_4LEVEL_HACK 134#define pud_free_tlb(tlb, pudp) \ 135 do { \ 136 tlb->need_flush = 1; \ 137 __pud_free_tlb(tlb, pudp); \ 138 } while (0) 139#endif 140 141#define pmd_free_tlb(tlb, pmdp) \ 142 do { \ 143 tlb->need_flush = 1; \ 144 __pmd_free_tlb(tlb, pmdp); \ 145 } while (0) 146 147#define tlb_migrate_finish(mm) do {} while (0) 148 149#endif /* _ASM_GENERIC__TLB_H */