Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.18 225 lines 6.4 kB view raw
1/* 2 * PowerPC64 SLB support. 3 * 4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM 5 * Based on earlier code writteh by: 6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com 7 * Copyright (c) 2001 Dave Engebretsen 8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM 9 * 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 17#undef DEBUG 18 19#include <asm/pgtable.h> 20#include <asm/mmu.h> 21#include <asm/mmu_context.h> 22#include <asm/paca.h> 23#include <asm/cputable.h> 24#include <asm/cacheflush.h> 25 26#ifdef DEBUG 27#define DBG(fmt...) udbg_printf(fmt) 28#else 29#define DBG(fmt...) 30#endif 31 32extern void slb_allocate_realmode(unsigned long ea); 33extern void slb_allocate_user(unsigned long ea); 34 35static void slb_allocate(unsigned long ea) 36{ 37 /* Currently, we do real mode for all SLBs including user, but 38 * that will change if we bring back dynamic VSIDs 39 */ 40 slb_allocate_realmode(ea); 41} 42 43static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot) 44{ 45 return (ea & ESID_MASK) | SLB_ESID_V | slot; 46} 47 48static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) 49{ 50 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; 51} 52 53static inline void create_slbe(unsigned long ea, unsigned long flags, 54 unsigned long entry) 55{ 56 asm volatile("slbmte %0,%1" : 57 : "r" (mk_vsid_data(ea, flags)), 58 "r" (mk_esid_data(ea, entry)) 59 : "memory" ); 60} 61 62void slb_flush_and_rebolt(void) 63{ 64 /* If you change this make sure you change SLB_NUM_BOLTED 65 * appropriately too. */ 66 unsigned long linear_llp, vmalloc_llp, lflags, vflags; 67 unsigned long ksp_esid_data; 68 69 WARN_ON(!irqs_disabled()); 70 71 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 72 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; 73 lflags = SLB_VSID_KERNEL | linear_llp; 74 vflags = SLB_VSID_KERNEL | vmalloc_llp; 75 76 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); 77 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) 78 ksp_esid_data &= ~SLB_ESID_V; 79 80 /* We need to do this all in asm, so we're sure we don't touch 81 * the stack between the slbia and rebolting it. */ 82 asm volatile("isync\n" 83 "slbia\n" 84 /* Slot 1 - first VMALLOC segment */ 85 "slbmte %0,%1\n" 86 /* Slot 2 - kernel stack */ 87 "slbmte %2,%3\n" 88 "isync" 89 :: "r"(mk_vsid_data(VMALLOC_START, vflags)), 90 "r"(mk_esid_data(VMALLOC_START, 1)), 91 "r"(mk_vsid_data(ksp_esid_data, lflags)), 92 "r"(ksp_esid_data) 93 : "memory"); 94} 95 96/* Flush all user entries from the segment table of the current processor. */ 97void switch_slb(struct task_struct *tsk, struct mm_struct *mm) 98{ 99 unsigned long offset = get_paca()->slb_cache_ptr; 100 unsigned long esid_data = 0; 101 unsigned long pc = KSTK_EIP(tsk); 102 unsigned long stack = KSTK_ESP(tsk); 103 unsigned long unmapped_base; 104 105 if (offset <= SLB_CACHE_ENTRIES) { 106 int i; 107 asm volatile("isync" : : : "memory"); 108 for (i = 0; i < offset; i++) { 109 esid_data = ((unsigned long)get_paca()->slb_cache[i] 110 << SID_SHIFT) | SLBIE_C; 111 asm volatile("slbie %0" : : "r" (esid_data)); 112 } 113 asm volatile("isync" : : : "memory"); 114 } else { 115 slb_flush_and_rebolt(); 116 } 117 118 /* Workaround POWER5 < DD2.1 issue */ 119 if (offset == 1 || offset > SLB_CACHE_ENTRIES) 120 asm volatile("slbie %0" : : "r" (esid_data)); 121 122 get_paca()->slb_cache_ptr = 0; 123 get_paca()->context = mm->context; 124 125 /* 126 * preload some userspace segments into the SLB. 127 */ 128 if (test_tsk_thread_flag(tsk, TIF_32BIT)) 129 unmapped_base = TASK_UNMAPPED_BASE_USER32; 130 else 131 unmapped_base = TASK_UNMAPPED_BASE_USER64; 132 133 if (is_kernel_addr(pc)) 134 return; 135 slb_allocate(pc); 136 137 if (GET_ESID(pc) == GET_ESID(stack)) 138 return; 139 140 if (is_kernel_addr(stack)) 141 return; 142 slb_allocate(stack); 143 144 if ((GET_ESID(pc) == GET_ESID(unmapped_base)) 145 || (GET_ESID(stack) == GET_ESID(unmapped_base))) 146 return; 147 148 if (is_kernel_addr(unmapped_base)) 149 return; 150 slb_allocate(unmapped_base); 151} 152 153static inline void patch_slb_encoding(unsigned int *insn_addr, 154 unsigned int immed) 155{ 156 /* Assume the instruction had a "0" immediate value, just 157 * "or" in the new value 158 */ 159 *insn_addr |= immed; 160 flush_icache_range((unsigned long)insn_addr, 4+ 161 (unsigned long)insn_addr); 162} 163 164void slb_initialize(void) 165{ 166 unsigned long linear_llp, vmalloc_llp, io_llp; 167 static int slb_encoding_inited; 168 extern unsigned int *slb_miss_kernel_load_linear; 169 extern unsigned int *slb_miss_kernel_load_io; 170#ifdef CONFIG_HUGETLB_PAGE 171 extern unsigned int *slb_miss_user_load_huge; 172 unsigned long huge_llp; 173 174 huge_llp = mmu_psize_defs[mmu_huge_psize].sllp; 175#endif 176 177 /* Prepare our SLB miss handler based on our page size */ 178 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 179 io_llp = mmu_psize_defs[mmu_io_psize].sllp; 180 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; 181 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; 182 183 if (!slb_encoding_inited) { 184 slb_encoding_inited = 1; 185 patch_slb_encoding(slb_miss_kernel_load_linear, 186 SLB_VSID_KERNEL | linear_llp); 187 patch_slb_encoding(slb_miss_kernel_load_io, 188 SLB_VSID_KERNEL | io_llp); 189 190 DBG("SLB: linear LLP = %04x\n", linear_llp); 191 DBG("SLB: io LLP = %04x\n", io_llp); 192#ifdef CONFIG_HUGETLB_PAGE 193 patch_slb_encoding(slb_miss_user_load_huge, 194 SLB_VSID_USER | huge_llp); 195 DBG("SLB: huge LLP = %04x\n", huge_llp); 196#endif 197 } 198 199 /* On iSeries the bolted entries have already been set up by 200 * the hypervisor from the lparMap data in head.S */ 201#ifndef CONFIG_PPC_ISERIES 202 { 203 unsigned long lflags, vflags; 204 205 lflags = SLB_VSID_KERNEL | linear_llp; 206 vflags = SLB_VSID_KERNEL | vmalloc_llp; 207 208 /* Invalidate the entire SLB (even slot 0) & all the ERATS */ 209 asm volatile("isync":::"memory"); 210 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 211 asm volatile("isync; slbia; isync":::"memory"); 212 create_slbe(PAGE_OFFSET, lflags, 0); 213 214 create_slbe(VMALLOC_START, vflags, 1); 215 216 /* We don't bolt the stack for the time being - we're in boot, 217 * so the stack is in the bolted segment. By the time it goes 218 * elsewhere, we'll call _switch() which will bolt in the new 219 * one. */ 220 asm volatile("isync":::"memory"); 221 } 222#endif /* CONFIG_PPC_ISERIES */ 223 224 get_paca()->stab_rr = SLB_NUM_BOLTED; 225}