Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[POWERPC] Merge asm-ppc/mmu_context.h into asm-power/mmu_context.h

Just did a directly merge from asm-ppc into asm-powerpc. This is the last
header that we directly include from asm-powerpc.

Signed-off-by: Kumar Gala <galak@kernel.crashing.org>

+198 -4
+198 -4
include/asm-powerpc/mmu_context.h
··· 2 2 #define __ASM_POWERPC_MMU_CONTEXT_H 3 3 #ifdef __KERNEL__ 4 4 5 + #include <asm/mmu.h> 6 + #include <asm/cputable.h> 7 + #include <asm-generic/mm_hooks.h> 8 + 5 9 #ifndef CONFIG_PPC64 6 - #include <asm-ppc/mmu_context.h> 10 + #include <asm/atomic.h> 11 + #include <asm/bitops.h> 12 + 13 + /* 14 + * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs 15 + * (virtual segment identifiers) for each context. Although the 16 + * hardware supports 24-bit VSIDs, and thus >1 million contexts, 17 + * we only use 32,768 of them. That is ample, since there can be 18 + * at most around 30,000 tasks in the system anyway, and it means 19 + * that we can use a bitmap to indicate which contexts are in use. 20 + * Using a bitmap means that we entirely avoid all of the problems 21 + * that we used to have when the context number overflowed, 22 + * particularly on SMP systems. 23 + * -- paulus. 24 + */ 25 + 26 + /* 27 + * This function defines the mapping from contexts to VSIDs (virtual 28 + * segment IDs). We use a skew on both the context and the high 4 bits 29 + * of the 32-bit virtual address (the "effective segment ID") in order 30 + * to spread out the entries in the MMU hash table. Note, if this 31 + * function is changed then arch/ppc/mm/hashtable.S will have to be 32 + * changed to correspond. 33 + */ 34 + #define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \ 35 + & 0xffffff) 36 + 37 + /* 38 + The MPC8xx has only 16 contexts. We rotate through them on each 39 + task switch. A better way would be to keep track of tasks that 40 + own contexts, and implement an LRU usage. That way very active 41 + tasks don't always have to pay the TLB reload overhead. The 42 + kernel pages are mapped shared, so the kernel can run on behalf 43 + of any task that makes a kernel entry. Shared does not mean they 44 + are not protected, just that the ASID comparison is not performed. 45 + -- Dan 46 + 47 + The IBM4xx has 256 contexts, so we can just rotate through these 48 + as a way of "switching" contexts. If the TID of the TLB is zero, 49 + the PID/TID comparison is disabled, so we can use a TID of zero 50 + to represent all kernel pages as shared among all contexts. 51 + -- Dan 52 + */ 53 + 54 + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 55 + { 56 + } 57 + 58 + #ifdef CONFIG_8xx 59 + #define NO_CONTEXT 16 60 + #define LAST_CONTEXT 15 61 + #define FIRST_CONTEXT 0 62 + 63 + #elif defined(CONFIG_4xx) 64 + #define NO_CONTEXT 256 65 + #define LAST_CONTEXT 255 66 + #define FIRST_CONTEXT 1 67 + 68 + #elif defined(CONFIG_E200) || defined(CONFIG_E500) 69 + #define NO_CONTEXT 256 70 + #define LAST_CONTEXT 255 71 + #define FIRST_CONTEXT 1 72 + 73 + #else 74 + 75 + /* PPC 6xx, 7xx CPUs */ 76 + #define NO_CONTEXT ((unsigned long) -1) 77 + #define LAST_CONTEXT 32767 78 + #define FIRST_CONTEXT 1 79 + #endif 80 + 81 + /* 82 + * Set the current MMU context. 83 + * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by 84 + * loading up the segment registers for the user part of the address space. 85 + * 86 + * Since the PGD is immediately available, it is much faster to simply 87 + * pass this along as a second parameter, which is required for 8xx and 88 + * can be used for debugging on all processors (if you happen to have 89 + * an Abatron). 90 + */ 91 + extern void set_context(unsigned long contextid, pgd_t *pgd); 92 + 93 + /* 94 + * Bitmap of contexts in use. 95 + * The size of this bitmap is LAST_CONTEXT + 1 bits. 96 + */ 97 + extern unsigned long context_map[]; 98 + 99 + /* 100 + * This caches the next context number that we expect to be free. 101 + * Its use is an optimization only, we can't rely on this context 102 + * number to be free, but it usually will be. 103 + */ 104 + extern unsigned long next_mmu_context; 105 + 106 + /* 107 + * If we don't have sufficient contexts to give one to every task 108 + * that could be in the system, we need to be able to steal contexts. 109 + * These variables support that. 110 + */ 111 + #if LAST_CONTEXT < 30000 112 + #define FEW_CONTEXTS 1 113 + extern atomic_t nr_free_contexts; 114 + extern struct mm_struct *context_mm[LAST_CONTEXT+1]; 115 + extern void steal_context(void); 116 + #endif 117 + 118 + /* 119 + * Get a new mmu context for the address space described by `mm'. 120 + */ 121 + static inline void get_mmu_context(struct mm_struct *mm) 122 + { 123 + unsigned long ctx; 124 + 125 + if (mm->context.id != NO_CONTEXT) 126 + return; 127 + #ifdef FEW_CONTEXTS 128 + while (atomic_dec_if_positive(&nr_free_contexts) < 0) 129 + steal_context(); 130 + #endif 131 + ctx = next_mmu_context; 132 + while (test_and_set_bit(ctx, context_map)) { 133 + ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); 134 + if (ctx > LAST_CONTEXT) 135 + ctx = 0; 136 + } 137 + next_mmu_context = (ctx + 1) & LAST_CONTEXT; 138 + mm->context.id = ctx; 139 + #ifdef FEW_CONTEXTS 140 + context_mm[ctx] = mm; 141 + #endif 142 + } 143 + 144 + /* 145 + * Set up the context for a new address space. 146 + */ 147 + static inline int init_new_context(struct task_struct *t, struct mm_struct *mm) 148 + { 149 + mm->context.id = NO_CONTEXT; 150 + mm->context.vdso_base = 0; 151 + return 0; 152 + } 153 + 154 + /* 155 + * We're finished using the context for an address space. 156 + */ 157 + static inline void destroy_context(struct mm_struct *mm) 158 + { 159 + preempt_disable(); 160 + if (mm->context.id != NO_CONTEXT) { 161 + clear_bit(mm->context.id, context_map); 162 + mm->context.id = NO_CONTEXT; 163 + #ifdef FEW_CONTEXTS 164 + atomic_inc(&nr_free_contexts); 165 + #endif 166 + } 167 + preempt_enable(); 168 + } 169 + 170 + static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 171 + struct task_struct *tsk) 172 + { 173 + #ifdef CONFIG_ALTIVEC 174 + if (cpu_has_feature(CPU_FTR_ALTIVEC)) 175 + asm volatile ("dssall;\n" 176 + #ifndef CONFIG_POWER4 177 + "sync;\n" /* G4 needs a sync here, G5 apparently not */ 178 + #endif 179 + : : ); 180 + #endif /* CONFIG_ALTIVEC */ 181 + 182 + tsk->thread.pgdir = next->pgd; 183 + 184 + /* No need to flush userspace segments if the mm doesnt change */ 185 + if (prev == next) 186 + return; 187 + 188 + /* Setup new userspace context */ 189 + get_mmu_context(next); 190 + set_context(next->context.id, next->pgd); 191 + } 192 + 193 + #define deactivate_mm(tsk,mm) do { } while (0) 194 + 195 + /* 196 + * After we have set current->mm to a new value, this activates 197 + * the context for the new mm so we see the new mappings. 198 + */ 199 + #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, current) 200 + 201 + extern void mmu_context_init(void); 202 + 203 + 7 204 #else 8 205 9 206 #include <linux/kernel.h> 10 207 #include <linux/mm.h> 11 208 #include <linux/sched.h> 12 - #include <asm/mmu.h> 13 - #include <asm/cputable.h> 14 - #include <asm-generic/mm_hooks.h> 15 209 16 210 /* 17 211 * Copyright (C) 2001 PPC 64 Team, IBM Corp