Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.8 348 lines 11 kB view raw
1/* 2 * PowerPC64 SLB support. 3 * 4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM 5 * Based on earlier code written by: 6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com 7 * Copyright (c) 2001 Dave Engebretsen 8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM 9 * 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 17#include <asm/pgtable.h> 18#include <asm/mmu.h> 19#include <asm/mmu_context.h> 20#include <asm/paca.h> 21#include <asm/cputable.h> 22#include <asm/cacheflush.h> 23#include <asm/smp.h> 24#include <linux/compiler.h> 25#include <asm/udbg.h> 26#include <asm/code-patching.h> 27 28enum slb_index { 29 LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */ 30 VMALLOC_INDEX = 1, /* Kernel virtual map (0xd000000000000000) */ 31 KSTACK_INDEX = 2, /* Kernel stack map */ 32}; 33 34extern void slb_allocate_realmode(unsigned long ea); 35 36static void slb_allocate(unsigned long ea) 37{ 38 /* Currently, we do real mode for all SLBs including user, but 39 * that will change if we bring back dynamic VSIDs 40 */ 41 slb_allocate_realmode(ea); 42} 43 44#define slb_esid_mask(ssize) \ 45 (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T) 46 47static inline unsigned long mk_esid_data(unsigned long ea, int ssize, 48 enum slb_index index) 49{ 50 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index; 51} 52 53static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, 54 unsigned long flags) 55{ 56 return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags | 57 ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT); 58} 59 60static inline void slb_shadow_update(unsigned long ea, int ssize, 61 unsigned long flags, 62 enum slb_index index) 63{ 64 struct slb_shadow *p = get_slb_shadow(); 65 66 /* 67 * Clear the ESID first so the entry is not valid while we are 68 * updating it. No write barriers are needed here, provided 69 * we only update the current CPU's SLB shadow buffer. 70 */ 71 p->save_area[index].esid = 0; 72 p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags)); 73 p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index)); 74} 75 76static inline void slb_shadow_clear(enum slb_index index) 77{ 78 get_slb_shadow()->save_area[index].esid = 0; 79} 80 81static inline void create_shadowed_slbe(unsigned long ea, int ssize, 82 unsigned long flags, 83 enum slb_index index) 84{ 85 /* 86 * Updating the shadow buffer before writing the SLB ensures 87 * we don't get a stale entry here if we get preempted by PHYP 88 * between these two statements. 89 */ 90 slb_shadow_update(ea, ssize, flags, index); 91 92 asm volatile("slbmte %0,%1" : 93 : "r" (mk_vsid_data(ea, ssize, flags)), 94 "r" (mk_esid_data(ea, ssize, index)) 95 : "memory" ); 96} 97 98static void __slb_flush_and_rebolt(void) 99{ 100 /* If you change this make sure you change SLB_NUM_BOLTED 101 * and PR KVM appropriately too. */ 102 unsigned long linear_llp, vmalloc_llp, lflags, vflags; 103 unsigned long ksp_esid_data, ksp_vsid_data; 104 105 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 106 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; 107 lflags = SLB_VSID_KERNEL | linear_llp; 108 vflags = SLB_VSID_KERNEL | vmalloc_llp; 109 110 ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX); 111 if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) { 112 ksp_esid_data &= ~SLB_ESID_V; 113 ksp_vsid_data = 0; 114 slb_shadow_clear(KSTACK_INDEX); 115 } else { 116 /* Update stack entry; others don't change */ 117 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX); 118 ksp_vsid_data = 119 be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid); 120 } 121 122 /* We need to do this all in asm, so we're sure we don't touch 123 * the stack between the slbia and rebolting it. */ 124 asm volatile("isync\n" 125 "slbia\n" 126 /* Slot 1 - first VMALLOC segment */ 127 "slbmte %0,%1\n" 128 /* Slot 2 - kernel stack */ 129 "slbmte %2,%3\n" 130 "isync" 131 :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)), 132 "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)), 133 "r"(ksp_vsid_data), 134 "r"(ksp_esid_data) 135 : "memory"); 136} 137 138void slb_flush_and_rebolt(void) 139{ 140 141 WARN_ON(!irqs_disabled()); 142 143 /* 144 * We can't take a PMU exception in the following code, so hard 145 * disable interrupts. 146 */ 147 hard_irq_disable(); 148 149 __slb_flush_and_rebolt(); 150 get_paca()->slb_cache_ptr = 0; 151} 152 153void slb_vmalloc_update(void) 154{ 155 unsigned long vflags; 156 157 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; 158 slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX); 159 slb_flush_and_rebolt(); 160} 161 162/* Helper function to compare esids. There are four cases to handle. 163 * 1. The system is not 1T segment size capable. Use the GET_ESID compare. 164 * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare. 165 * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match. 166 * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare. 167 */ 168static inline int esids_match(unsigned long addr1, unsigned long addr2) 169{ 170 int esid_1t_count; 171 172 /* System is not 1T segment size capable. */ 173 if (!mmu_has_feature(MMU_FTR_1T_SEGMENT)) 174 return (GET_ESID(addr1) == GET_ESID(addr2)); 175 176 esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + 177 ((addr2 >> SID_SHIFT_1T) != 0)); 178 179 /* both addresses are < 1T */ 180 if (esid_1t_count == 0) 181 return (GET_ESID(addr1) == GET_ESID(addr2)); 182 183 /* One address < 1T, the other > 1T. Not a match */ 184 if (esid_1t_count == 1) 185 return 0; 186 187 /* Both addresses are > 1T. */ 188 return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2)); 189} 190 191/* Flush all user entries from the segment table of the current processor. */ 192void switch_slb(struct task_struct *tsk, struct mm_struct *mm) 193{ 194 unsigned long offset; 195 unsigned long slbie_data = 0; 196 unsigned long pc = KSTK_EIP(tsk); 197 unsigned long stack = KSTK_ESP(tsk); 198 unsigned long exec_base; 199 200 /* 201 * We need interrupts hard-disabled here, not just soft-disabled, 202 * so that a PMU interrupt can't occur, which might try to access 203 * user memory (to get a stack trace) and possible cause an SLB miss 204 * which would update the slb_cache/slb_cache_ptr fields in the PACA. 205 */ 206 hard_irq_disable(); 207 offset = get_paca()->slb_cache_ptr; 208 if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) && 209 offset <= SLB_CACHE_ENTRIES) { 210 int i; 211 asm volatile("isync" : : : "memory"); 212 for (i = 0; i < offset; i++) { 213 slbie_data = (unsigned long)get_paca()->slb_cache[i] 214 << SID_SHIFT; /* EA */ 215 slbie_data |= user_segment_size(slbie_data) 216 << SLBIE_SSIZE_SHIFT; 217 slbie_data |= SLBIE_C; /* C set for user addresses */ 218 asm volatile("slbie %0" : : "r" (slbie_data)); 219 } 220 asm volatile("isync" : : : "memory"); 221 } else { 222 __slb_flush_and_rebolt(); 223 } 224 225 /* Workaround POWER5 < DD2.1 issue */ 226 if (offset == 1 || offset > SLB_CACHE_ENTRIES) 227 asm volatile("slbie %0" : : "r" (slbie_data)); 228 229 get_paca()->slb_cache_ptr = 0; 230 copy_mm_to_paca(&mm->context); 231 232 /* 233 * preload some userspace segments into the SLB. 234 * Almost all 32 and 64bit PowerPC executables are linked at 235 * 0x10000000 so it makes sense to preload this segment. 236 */ 237 exec_base = 0x10000000; 238 239 if (is_kernel_addr(pc) || is_kernel_addr(stack) || 240 is_kernel_addr(exec_base)) 241 return; 242 243 slb_allocate(pc); 244 245 if (!esids_match(pc, stack)) 246 slb_allocate(stack); 247 248 if (!esids_match(pc, exec_base) && 249 !esids_match(stack, exec_base)) 250 slb_allocate(exec_base); 251} 252 253static inline void patch_slb_encoding(unsigned int *insn_addr, 254 unsigned int immed) 255{ 256 257 /* 258 * This function patches either an li or a cmpldi instruction with 259 * a new immediate value. This relies on the fact that both li 260 * (which is actually addi) and cmpldi both take a 16-bit immediate 261 * value, and it is situated in the same location in the instruction, 262 * ie. bits 16-31 (Big endian bit order) or the lower 16 bits. 263 * The signedness of the immediate operand differs between the two 264 * instructions however this code is only ever patching a small value, 265 * much less than 1 << 15, so we can get away with it. 266 * To patch the value we read the existing instruction, clear the 267 * immediate value, and or in our new value, then write the instruction 268 * back. 269 */ 270 unsigned int insn = (*insn_addr & 0xffff0000) | immed; 271 patch_instruction(insn_addr, insn); 272} 273 274extern u32 slb_miss_kernel_load_linear[]; 275extern u32 slb_miss_kernel_load_io[]; 276extern u32 slb_compare_rr_to_size[]; 277extern u32 slb_miss_kernel_load_vmemmap[]; 278 279void slb_set_size(u16 size) 280{ 281 if (mmu_slb_size == size) 282 return; 283 284 mmu_slb_size = size; 285 patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size); 286} 287 288void slb_initialize(void) 289{ 290 unsigned long linear_llp, vmalloc_llp, io_llp; 291 unsigned long lflags, vflags; 292 static int slb_encoding_inited; 293#ifdef CONFIG_SPARSEMEM_VMEMMAP 294 unsigned long vmemmap_llp; 295#endif 296 297 /* Prepare our SLB miss handler based on our page size */ 298 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 299 io_llp = mmu_psize_defs[mmu_io_psize].sllp; 300 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; 301 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; 302#ifdef CONFIG_SPARSEMEM_VMEMMAP 303 vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp; 304#endif 305 if (!slb_encoding_inited) { 306 slb_encoding_inited = 1; 307 patch_slb_encoding(slb_miss_kernel_load_linear, 308 SLB_VSID_KERNEL | linear_llp); 309 patch_slb_encoding(slb_miss_kernel_load_io, 310 SLB_VSID_KERNEL | io_llp); 311 patch_slb_encoding(slb_compare_rr_to_size, 312 mmu_slb_size); 313 314 pr_devel("SLB: linear LLP = %04lx\n", linear_llp); 315 pr_devel("SLB: io LLP = %04lx\n", io_llp); 316 317#ifdef CONFIG_SPARSEMEM_VMEMMAP 318 patch_slb_encoding(slb_miss_kernel_load_vmemmap, 319 SLB_VSID_KERNEL | vmemmap_llp); 320 pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); 321#endif 322 } 323 324 get_paca()->stab_rr = SLB_NUM_BOLTED; 325 326 lflags = SLB_VSID_KERNEL | linear_llp; 327 vflags = SLB_VSID_KERNEL | vmalloc_llp; 328 329 /* Invalidate the entire SLB (even entry 0) & all the ERATS */ 330 asm volatile("isync":::"memory"); 331 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 332 asm volatile("isync; slbia; isync":::"memory"); 333 create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX); 334 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX); 335 336 /* For the boot cpu, we're running on the stack in init_thread_union, 337 * which is in the first segment of the linear mapping, and also 338 * get_paca()->kstack hasn't been initialized yet. 339 * For secondary cpus, we need to bolt the kernel stack entry now. 340 */ 341 slb_shadow_clear(KSTACK_INDEX); 342 if (raw_smp_processor_id() != boot_cpuid && 343 (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) 344 create_shadowed_slbe(get_paca()->kstack, 345 mmu_kernel_ssize, lflags, KSTACK_INDEX); 346 347 asm volatile("isync":::"memory"); 348}