Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.19 254 lines 7.2 kB view raw
1/* 2 * PowerPC64 SLB support. 3 * 4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM 5 * Based on earlier code writteh by: 6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com 7 * Copyright (c) 2001 Dave Engebretsen 8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM 9 * 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 17#undef DEBUG 18 19#include <asm/pgtable.h> 20#include <asm/mmu.h> 21#include <asm/mmu_context.h> 22#include <asm/paca.h> 23#include <asm/cputable.h> 24#include <asm/cacheflush.h> 25#include <asm/smp.h> 26#include <linux/compiler.h> 27 28#ifdef DEBUG 29#define DBG(fmt...) udbg_printf(fmt) 30#else 31#define DBG(fmt...) 32#endif 33 34extern void slb_allocate_realmode(unsigned long ea); 35extern void slb_allocate_user(unsigned long ea); 36 37static void slb_allocate(unsigned long ea) 38{ 39 /* Currently, we do real mode for all SLBs including user, but 40 * that will change if we bring back dynamic VSIDs 41 */ 42 slb_allocate_realmode(ea); 43} 44 45static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot) 46{ 47 return (ea & ESID_MASK) | SLB_ESID_V | slot; 48} 49 50static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) 51{ 52 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; 53} 54 55static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, 56 unsigned long entry) 57{ 58 /* 59 * Clear the ESID first so the entry is not valid while we are 60 * updating it. 61 */ 62 get_slb_shadow()->save_area[entry].esid = 0; 63 barrier(); 64 get_slb_shadow()->save_area[entry].vsid = vsid; 65 barrier(); 66 get_slb_shadow()->save_area[entry].esid = esid; 67 68} 69 70static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, 71 unsigned long entry) 72{ 73 /* 74 * Updating the shadow buffer before writing the SLB ensures 75 * we don't get a stale entry here if we get preempted by PHYP 76 * between these two statements. 77 */ 78 slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags), 79 entry); 80 81 asm volatile("slbmte %0,%1" : 82 : "r" (mk_vsid_data(ea, flags)), 83 "r" (mk_esid_data(ea, entry)) 84 : "memory" ); 85} 86 87void slb_flush_and_rebolt(void) 88{ 89 /* If you change this make sure you change SLB_NUM_BOLTED 90 * appropriately too. */ 91 unsigned long linear_llp, vmalloc_llp, lflags, vflags; 92 unsigned long ksp_esid_data; 93 94 WARN_ON(!irqs_disabled()); 95 96 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 97 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; 98 lflags = SLB_VSID_KERNEL | linear_llp; 99 vflags = SLB_VSID_KERNEL | vmalloc_llp; 100 101 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); 102 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) 103 ksp_esid_data &= ~SLB_ESID_V; 104 105 /* Only third entry (stack) may change here so only resave that */ 106 slb_shadow_update(ksp_esid_data, 107 mk_vsid_data(ksp_esid_data, lflags), 2); 108 109 /* We need to do this all in asm, so we're sure we don't touch 110 * the stack between the slbia and rebolting it. */ 111 asm volatile("isync\n" 112 "slbia\n" 113 /* Slot 1 - first VMALLOC segment */ 114 "slbmte %0,%1\n" 115 /* Slot 2 - kernel stack */ 116 "slbmte %2,%3\n" 117 "isync" 118 :: "r"(mk_vsid_data(VMALLOC_START, vflags)), 119 "r"(mk_esid_data(VMALLOC_START, 1)), 120 "r"(mk_vsid_data(ksp_esid_data, lflags)), 121 "r"(ksp_esid_data) 122 : "memory"); 123} 124 125/* Flush all user entries from the segment table of the current processor. */ 126void switch_slb(struct task_struct *tsk, struct mm_struct *mm) 127{ 128 unsigned long offset = get_paca()->slb_cache_ptr; 129 unsigned long esid_data = 0; 130 unsigned long pc = KSTK_EIP(tsk); 131 unsigned long stack = KSTK_ESP(tsk); 132 unsigned long unmapped_base; 133 134 if (offset <= SLB_CACHE_ENTRIES) { 135 int i; 136 asm volatile("isync" : : : "memory"); 137 for (i = 0; i < offset; i++) { 138 esid_data = ((unsigned long)get_paca()->slb_cache[i] 139 << SID_SHIFT) | SLBIE_C; 140 asm volatile("slbie %0" : : "r" (esid_data)); 141 } 142 asm volatile("isync" : : : "memory"); 143 } else { 144 slb_flush_and_rebolt(); 145 } 146 147 /* Workaround POWER5 < DD2.1 issue */ 148 if (offset == 1 || offset > SLB_CACHE_ENTRIES) 149 asm volatile("slbie %0" : : "r" (esid_data)); 150 151 get_paca()->slb_cache_ptr = 0; 152 get_paca()->context = mm->context; 153 154 /* 155 * preload some userspace segments into the SLB. 156 */ 157 if (test_tsk_thread_flag(tsk, TIF_32BIT)) 158 unmapped_base = TASK_UNMAPPED_BASE_USER32; 159 else 160 unmapped_base = TASK_UNMAPPED_BASE_USER64; 161 162 if (is_kernel_addr(pc)) 163 return; 164 slb_allocate(pc); 165 166 if (GET_ESID(pc) == GET_ESID(stack)) 167 return; 168 169 if (is_kernel_addr(stack)) 170 return; 171 slb_allocate(stack); 172 173 if ((GET_ESID(pc) == GET_ESID(unmapped_base)) 174 || (GET_ESID(stack) == GET_ESID(unmapped_base))) 175 return; 176 177 if (is_kernel_addr(unmapped_base)) 178 return; 179 slb_allocate(unmapped_base); 180} 181 182static inline void patch_slb_encoding(unsigned int *insn_addr, 183 unsigned int immed) 184{ 185 /* Assume the instruction had a "0" immediate value, just 186 * "or" in the new value 187 */ 188 *insn_addr |= immed; 189 flush_icache_range((unsigned long)insn_addr, 4+ 190 (unsigned long)insn_addr); 191} 192 193void slb_initialize(void) 194{ 195 unsigned long linear_llp, vmalloc_llp, io_llp; 196 static int slb_encoding_inited; 197 extern unsigned int *slb_miss_kernel_load_linear; 198 extern unsigned int *slb_miss_kernel_load_io; 199#ifdef CONFIG_HUGETLB_PAGE 200 extern unsigned int *slb_miss_user_load_huge; 201 unsigned long huge_llp; 202 203 huge_llp = mmu_psize_defs[mmu_huge_psize].sllp; 204#endif 205 206 /* Prepare our SLB miss handler based on our page size */ 207 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 208 io_llp = mmu_psize_defs[mmu_io_psize].sllp; 209 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; 210 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; 211 212 if (!slb_encoding_inited) { 213 slb_encoding_inited = 1; 214 patch_slb_encoding(slb_miss_kernel_load_linear, 215 SLB_VSID_KERNEL | linear_llp); 216 patch_slb_encoding(slb_miss_kernel_load_io, 217 SLB_VSID_KERNEL | io_llp); 218 219 DBG("SLB: linear LLP = %04x\n", linear_llp); 220 DBG("SLB: io LLP = %04x\n", io_llp); 221#ifdef CONFIG_HUGETLB_PAGE 222 patch_slb_encoding(slb_miss_user_load_huge, 223 SLB_VSID_USER | huge_llp); 224 DBG("SLB: huge LLP = %04x\n", huge_llp); 225#endif 226 } 227 228 /* On iSeries the bolted entries have already been set up by 229 * the hypervisor from the lparMap data in head.S */ 230#ifndef CONFIG_PPC_ISERIES 231 { 232 unsigned long lflags, vflags; 233 234 lflags = SLB_VSID_KERNEL | linear_llp; 235 vflags = SLB_VSID_KERNEL | vmalloc_llp; 236 237 /* Invalidate the entire SLB (even slot 0) & all the ERATS */ 238 asm volatile("isync":::"memory"); 239 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 240 asm volatile("isync; slbia; isync":::"memory"); 241 create_shadowed_slbe(PAGE_OFFSET, lflags, 0); 242 243 create_shadowed_slbe(VMALLOC_START, vflags, 1); 244 245 /* We don't bolt the stack for the time being - we're in boot, 246 * so the stack is in the bolted segment. By the time it goes 247 * elsewhere, we'll call _switch() which will bolt in the new 248 * one. */ 249 asm volatile("isync":::"memory"); 250 } 251#endif /* CONFIG_PPC_ISERIES */ 252 253 get_paca()->stab_rr = SLB_NUM_BOLTED; 254}