Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.30 328 lines 10 kB view raw
1/* 2 * PowerPC64 SLB support. 3 * 4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM 5 * Based on earlier code writteh by: 6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com 7 * Copyright (c) 2001 Dave Engebretsen 8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM 9 * 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 17#undef DEBUG 18 19#include <asm/pgtable.h> 20#include <asm/mmu.h> 21#include <asm/mmu_context.h> 22#include <asm/paca.h> 23#include <asm/cputable.h> 24#include <asm/cacheflush.h> 25#include <asm/smp.h> 26#include <asm/firmware.h> 27#include <linux/compiler.h> 28#include <asm/udbg.h> 29 30#ifdef DEBUG 31#define DBG(fmt...) printk(fmt) 32#else 33#define DBG pr_debug 34#endif 35 36extern void slb_allocate_realmode(unsigned long ea); 37extern void slb_allocate_user(unsigned long ea); 38 39static void slb_allocate(unsigned long ea) 40{ 41 /* Currently, we do real mode for all SLBs including user, but 42 * that will change if we bring back dynamic VSIDs 43 */ 44 slb_allocate_realmode(ea); 45} 46 47#define slb_esid_mask(ssize) \ 48 (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T) 49 50static inline unsigned long mk_esid_data(unsigned long ea, int ssize, 51 unsigned long slot) 52{ 53 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot; 54} 55 56#define slb_vsid_shift(ssize) \ 57 ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T) 58 59static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, 60 unsigned long flags) 61{ 62 return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags | 63 ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT); 64} 65 66static inline void slb_shadow_update(unsigned long ea, int ssize, 67 unsigned long flags, 68 unsigned long entry) 69{ 70 /* 71 * Clear the ESID first so the entry is not valid while we are 72 * updating it. No write barriers are needed here, provided 73 * we only update the current CPU's SLB shadow buffer. 74 */ 75 get_slb_shadow()->save_area[entry].esid = 0; 76 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags); 77 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry); 78} 79 80static inline void slb_shadow_clear(unsigned long entry) 81{ 82 get_slb_shadow()->save_area[entry].esid = 0; 83} 84 85static inline void create_shadowed_slbe(unsigned long ea, int ssize, 86 unsigned long flags, 87 unsigned long entry) 88{ 89 /* 90 * Updating the shadow buffer before writing the SLB ensures 91 * we don't get a stale entry here if we get preempted by PHYP 92 * between these two statements. 93 */ 94 slb_shadow_update(ea, ssize, flags, entry); 95 96 asm volatile("slbmte %0,%1" : 97 : "r" (mk_vsid_data(ea, ssize, flags)), 98 "r" (mk_esid_data(ea, ssize, entry)) 99 : "memory" ); 100} 101 102void slb_flush_and_rebolt(void) 103{ 104 /* If you change this make sure you change SLB_NUM_BOLTED 105 * appropriately too. */ 106 unsigned long linear_llp, vmalloc_llp, lflags, vflags; 107 unsigned long ksp_esid_data, ksp_vsid_data; 108 109 WARN_ON(!irqs_disabled()); 110 111 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 112 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; 113 lflags = SLB_VSID_KERNEL | linear_llp; 114 vflags = SLB_VSID_KERNEL | vmalloc_llp; 115 116 ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2); 117 if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) { 118 ksp_esid_data &= ~SLB_ESID_V; 119 ksp_vsid_data = 0; 120 slb_shadow_clear(2); 121 } else { 122 /* Update stack entry; others don't change */ 123 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2); 124 ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; 125 } 126 127 /* 128 * We can't take a PMU exception in the following code, so hard 129 * disable interrupts. 130 */ 131 hard_irq_disable(); 132 133 /* We need to do this all in asm, so we're sure we don't touch 134 * the stack between the slbia and rebolting it. */ 135 asm volatile("isync\n" 136 "slbia\n" 137 /* Slot 1 - first VMALLOC segment */ 138 "slbmte %0,%1\n" 139 /* Slot 2 - kernel stack */ 140 "slbmte %2,%3\n" 141 "isync" 142 :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)), 143 "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)), 144 "r"(ksp_vsid_data), 145 "r"(ksp_esid_data) 146 : "memory"); 147} 148 149void slb_vmalloc_update(void) 150{ 151 unsigned long vflags; 152 153 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; 154 slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1); 155 slb_flush_and_rebolt(); 156} 157 158/* Helper function to compare esids. There are four cases to handle. 159 * 1. The system is not 1T segment size capable. Use the GET_ESID compare. 160 * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare. 161 * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match. 162 * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare. 163 */ 164static inline int esids_match(unsigned long addr1, unsigned long addr2) 165{ 166 int esid_1t_count; 167 168 /* System is not 1T segment size capable. */ 169 if (!cpu_has_feature(CPU_FTR_1T_SEGMENT)) 170 return (GET_ESID(addr1) == GET_ESID(addr2)); 171 172 esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + 173 ((addr2 >> SID_SHIFT_1T) != 0)); 174 175 /* both addresses are < 1T */ 176 if (esid_1t_count == 0) 177 return (GET_ESID(addr1) == GET_ESID(addr2)); 178 179 /* One address < 1T, the other > 1T. Not a match */ 180 if (esid_1t_count == 1) 181 return 0; 182 183 /* Both addresses are > 1T. */ 184 return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2)); 185} 186 187/* Flush all user entries from the segment table of the current processor. */ 188void switch_slb(struct task_struct *tsk, struct mm_struct *mm) 189{ 190 unsigned long offset = get_paca()->slb_cache_ptr; 191 unsigned long slbie_data = 0; 192 unsigned long pc = KSTK_EIP(tsk); 193 unsigned long stack = KSTK_ESP(tsk); 194 unsigned long unmapped_base; 195 196 if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && 197 offset <= SLB_CACHE_ENTRIES) { 198 int i; 199 asm volatile("isync" : : : "memory"); 200 for (i = 0; i < offset; i++) { 201 slbie_data = (unsigned long)get_paca()->slb_cache[i] 202 << SID_SHIFT; /* EA */ 203 slbie_data |= user_segment_size(slbie_data) 204 << SLBIE_SSIZE_SHIFT; 205 slbie_data |= SLBIE_C; /* C set for user addresses */ 206 asm volatile("slbie %0" : : "r" (slbie_data)); 207 } 208 asm volatile("isync" : : : "memory"); 209 } else { 210 slb_flush_and_rebolt(); 211 } 212 213 /* Workaround POWER5 < DD2.1 issue */ 214 if (offset == 1 || offset > SLB_CACHE_ENTRIES) 215 asm volatile("slbie %0" : : "r" (slbie_data)); 216 217 get_paca()->slb_cache_ptr = 0; 218 get_paca()->context = mm->context; 219 220 /* 221 * preload some userspace segments into the SLB. 222 */ 223 if (test_tsk_thread_flag(tsk, TIF_32BIT)) 224 unmapped_base = TASK_UNMAPPED_BASE_USER32; 225 else 226 unmapped_base = TASK_UNMAPPED_BASE_USER64; 227 228 if (is_kernel_addr(pc)) 229 return; 230 slb_allocate(pc); 231 232 if (esids_match(pc,stack)) 233 return; 234 235 if (is_kernel_addr(stack)) 236 return; 237 slb_allocate(stack); 238 239 if (esids_match(pc,unmapped_base) || esids_match(stack,unmapped_base)) 240 return; 241 242 if (is_kernel_addr(unmapped_base)) 243 return; 244 slb_allocate(unmapped_base); 245} 246 247static inline void patch_slb_encoding(unsigned int *insn_addr, 248 unsigned int immed) 249{ 250 /* Assume the instruction had a "0" immediate value, just 251 * "or" in the new value 252 */ 253 *insn_addr |= immed; 254 flush_icache_range((unsigned long)insn_addr, 4+ 255 (unsigned long)insn_addr); 256} 257 258void slb_initialize(void) 259{ 260 unsigned long linear_llp, vmalloc_llp, io_llp; 261 unsigned long lflags, vflags; 262 static int slb_encoding_inited; 263 extern unsigned int *slb_miss_kernel_load_linear; 264 extern unsigned int *slb_miss_kernel_load_io; 265 extern unsigned int *slb_compare_rr_to_size; 266#ifdef CONFIG_SPARSEMEM_VMEMMAP 267 extern unsigned int *slb_miss_kernel_load_vmemmap; 268 unsigned long vmemmap_llp; 269#endif 270 271 /* Prepare our SLB miss handler based on our page size */ 272 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 273 io_llp = mmu_psize_defs[mmu_io_psize].sllp; 274 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; 275 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; 276#ifdef CONFIG_SPARSEMEM_VMEMMAP 277 vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp; 278#endif 279 if (!slb_encoding_inited) { 280 slb_encoding_inited = 1; 281 patch_slb_encoding(slb_miss_kernel_load_linear, 282 SLB_VSID_KERNEL | linear_llp); 283 patch_slb_encoding(slb_miss_kernel_load_io, 284 SLB_VSID_KERNEL | io_llp); 285 patch_slb_encoding(slb_compare_rr_to_size, 286 mmu_slb_size); 287 288 DBG("SLB: linear LLP = %04lx\n", linear_llp); 289 DBG("SLB: io LLP = %04lx\n", io_llp); 290 291#ifdef CONFIG_SPARSEMEM_VMEMMAP 292 patch_slb_encoding(slb_miss_kernel_load_vmemmap, 293 SLB_VSID_KERNEL | vmemmap_llp); 294 DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); 295#endif 296 } 297 298 get_paca()->stab_rr = SLB_NUM_BOLTED; 299 300 /* On iSeries the bolted entries have already been set up by 301 * the hypervisor from the lparMap data in head.S */ 302 if (firmware_has_feature(FW_FEATURE_ISERIES)) 303 return; 304 305 lflags = SLB_VSID_KERNEL | linear_llp; 306 vflags = SLB_VSID_KERNEL | vmalloc_llp; 307 308 /* Invalidate the entire SLB (even slot 0) & all the ERATS */ 309 asm volatile("isync":::"memory"); 310 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 311 asm volatile("isync; slbia; isync":::"memory"); 312 create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0); 313 314 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); 315 316 /* For the boot cpu, we're running on the stack in init_thread_union, 317 * which is in the first segment of the linear mapping, and also 318 * get_paca()->kstack hasn't been initialized yet. 319 * For secondary cpus, we need to bolt the kernel stack entry now. 320 */ 321 slb_shadow_clear(2); 322 if (raw_smp_processor_id() != boot_cpuid && 323 (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) 324 create_shadowed_slbe(get_paca()->kstack, 325 mmu_kernel_ssize, lflags, 2); 326 327 asm volatile("isync":::"memory"); 328}