Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.13 346 lines 10 kB view raw
1/* 2 * PowerPC memory management structures 3 * 4 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> 5 * PPC64 rework. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13#ifndef _PPC64_MMU_H_ 14#define _PPC64_MMU_H_ 15 16#include <linux/config.h> 17#include <asm/page.h> 18 19/* 20 * Segment table 21 */ 22 23#define STE_ESID_V 0x80 24#define STE_ESID_KS 0x20 25#define STE_ESID_KP 0x10 26#define STE_ESID_N 0x08 27 28#define STE_VSID_SHIFT 12 29 30/* Location of cpu0's segment table */ 31#define STAB0_PAGE 0x9 32#define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT) 33#define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR) 34 35/* 36 * SLB 37 */ 38 39#define SLB_NUM_BOLTED 3 40#define SLB_CACHE_ENTRIES 8 41 42/* Bits in the SLB ESID word */ 43#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ 44 45/* Bits in the SLB VSID word */ 46#define SLB_VSID_SHIFT 12 47#define SLB_VSID_KS ASM_CONST(0x0000000000000800) 48#define SLB_VSID_KP ASM_CONST(0x0000000000000400) 49#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ 50#define SLB_VSID_L ASM_CONST(0x0000000000000100) /* largepage */ 51#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ 52#define SLB_VSID_LS ASM_CONST(0x0000000000000070) /* size of largepage */ 53 54#define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C) 55#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS) 56 57/* 58 * Hash table 59 */ 60 61#define HPTES_PER_GROUP 8 62 63#define HPTE_V_AVPN_SHIFT 7 64#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80) 65#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) 66#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) 67#define HPTE_V_LOCK ASM_CONST(0x0000000000000008) 68#define HPTE_V_LARGE ASM_CONST(0x0000000000000004) 69#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) 70#define HPTE_V_VALID ASM_CONST(0x0000000000000001) 71 72#define HPTE_R_PP0 ASM_CONST(0x8000000000000000) 73#define HPTE_R_TS ASM_CONST(0x4000000000000000) 74#define HPTE_R_RPN_SHIFT 12 75#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) 76#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) 77#define HPTE_R_PP ASM_CONST(0x0000000000000003) 78 79/* Values for PP (assumes Ks=0, Kp=1) */ 80/* pp0 will always be 0 for linux */ 81#define PP_RWXX 0 /* Supervisor read/write, User none */ 82#define PP_RWRX 1 /* Supervisor read/write, User read */ 83#define PP_RWRW 2 /* Supervisor read/write, User read/write */ 84#define PP_RXRX 3 /* Supervisor read, User read */ 85 86#ifndef __ASSEMBLY__ 87 88typedef struct { 89 unsigned long v; 90 unsigned long r; 91} hpte_t; 92 93extern hpte_t *htab_address; 94extern unsigned long htab_hash_mask; 95 96static inline unsigned long hpt_hash(unsigned long vpn, int large) 97{ 98 unsigned long vsid; 99 unsigned long page; 100 101 if (large) { 102 vsid = vpn >> 4; 103 page = vpn & 0xf; 104 } else { 105 vsid = vpn >> 16; 106 page = vpn & 0xffff; 107 } 108 109 return (vsid & 0x7fffffffffUL) ^ page; 110} 111 112static inline void __tlbie(unsigned long va, int large) 113{ 114 /* clear top 16 bits, non SLS segment */ 115 va &= ~(0xffffULL << 48); 116 117 if (large) { 118 va &= HPAGE_MASK; 119 asm volatile("tlbie %0,1" : : "r"(va) : "memory"); 120 } else { 121 va &= PAGE_MASK; 122 asm volatile("tlbie %0,0" : : "r"(va) : "memory"); 123 } 124} 125 126static inline void tlbie(unsigned long va, int large) 127{ 128 asm volatile("ptesync": : :"memory"); 129 __tlbie(va, large); 130 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 131} 132 133static inline void __tlbiel(unsigned long va) 134{ 135 /* clear top 16 bits, non SLS segment */ 136 va &= ~(0xffffULL << 48); 137 va &= PAGE_MASK; 138 139 /* 140 * Thanks to Alan Modra we are now able to use machine specific 141 * assembly instructions (like tlbiel) by using the gas -many flag. 142 * However we have to support older toolchains so for the moment 143 * we hardwire it. 144 */ 145#if 0 146 asm volatile("tlbiel %0" : : "r"(va) : "memory"); 147#else 148 asm volatile(".long 0x7c000224 | (%0 << 11)" : : "r"(va) : "memory"); 149#endif 150} 151 152static inline void tlbiel(unsigned long va) 153{ 154 asm volatile("ptesync": : :"memory"); 155 __tlbiel(va); 156 asm volatile("ptesync": : :"memory"); 157} 158 159static inline unsigned long slot2va(unsigned long hpte_v, unsigned long slot) 160{ 161 unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v); 162 unsigned long va; 163 164 va = avpn << 23; 165 166 if (! (hpte_v & HPTE_V_LARGE)) { 167 unsigned long vpi, pteg; 168 169 pteg = slot / HPTES_PER_GROUP; 170 if (hpte_v & HPTE_V_SECONDARY) 171 pteg = ~pteg; 172 173 vpi = ((va >> 28) ^ pteg) & htab_hash_mask; 174 175 va |= vpi << PAGE_SHIFT; 176 } 177 178 return va; 179} 180 181/* 182 * Handle a fault by adding an HPTE. If the address can't be determined 183 * to be valid via Linux page tables, return 1. If handled return 0 184 */ 185extern int __hash_page(unsigned long ea, unsigned long access, 186 unsigned long vsid, pte_t *ptep, unsigned long trap, 187 int local); 188 189extern void htab_finish_init(void); 190 191extern void hpte_init_native(void); 192extern void hpte_init_lpar(void); 193extern void hpte_init_iSeries(void); 194 195extern long pSeries_lpar_hpte_insert(unsigned long hpte_group, 196 unsigned long va, unsigned long prpn, 197 unsigned long vflags, 198 unsigned long rflags); 199extern long native_hpte_insert(unsigned long hpte_group, unsigned long va, 200 unsigned long prpn, 201 unsigned long vflags, unsigned long rflags); 202 203extern void stabs_alloc(void); 204 205#endif /* __ASSEMBLY__ */ 206 207/* 208 * VSID allocation 209 * 210 * We first generate a 36-bit "proto-VSID". For kernel addresses this 211 * is equal to the ESID, for user addresses it is: 212 * (context << 15) | (esid & 0x7fff) 213 * 214 * The two forms are distinguishable because the top bit is 0 for user 215 * addresses, whereas the top two bits are 1 for kernel addresses. 216 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for 217 * now. 218 * 219 * The proto-VSIDs are then scrambled into real VSIDs with the 220 * multiplicative hash: 221 * 222 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS 223 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7 224 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF 225 * 226 * This scramble is only well defined for proto-VSIDs below 227 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are 228 * reserved. VSID_MULTIPLIER is prime, so in particular it is 229 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. 230 * Because the modulus is 2^n-1 we can compute it efficiently without 231 * a divide or extra multiply (see below). 232 * 233 * This scheme has several advantages over older methods: 234 * 235 * - We have VSIDs allocated for every kernel address 236 * (i.e. everything above 0xC000000000000000), except the very top 237 * segment, which simplifies several things. 238 * 239 * - We allow for 15 significant bits of ESID and 20 bits of 240 * context for user addresses. i.e. 8T (43 bits) of address space for 241 * up to 1M contexts (although the page table structure and context 242 * allocation will need changes to take advantage of this). 243 * 244 * - The scramble function gives robust scattering in the hash 245 * table (at least based on some initial results). The previous 246 * method was more susceptible to pathological cases giving excessive 247 * hash collisions. 248 */ 249/* 250 * WARNING - If you change these you must make sure the asm 251 * implementations in slb_allocate (slb_low.S), do_stab_bolted 252 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. 253 * 254 * You'll also need to change the precomputed VSID values in head.S 255 * which are used by the iSeries firmware. 256 */ 257 258#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */ 259#define VSID_BITS 36 260#define VSID_MODULUS ((1UL<<VSID_BITS)-1) 261 262#define CONTEXT_BITS 20 263#define USER_ESID_BITS 15 264 265/* 266 * This macro generates asm code to compute the VSID scramble 267 * function. Used in slb_allocate() and do_stab_bolted. The function 268 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS 269 * 270 * rt = register continaing the proto-VSID and into which the 271 * VSID will be stored 272 * rx = scratch register (clobbered) 273 * 274 * - rt and rx must be different registers 275 * - The answer will end up in the low 36 bits of rt. The higher 276 * bits may contain other garbage, so you may need to mask the 277 * result. 278 */ 279#define ASM_VSID_SCRAMBLE(rt, rx) \ 280 lis rx,VSID_MULTIPLIER@h; \ 281 ori rx,rx,VSID_MULTIPLIER@l; \ 282 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \ 283 \ 284 srdi rx,rt,VSID_BITS; \ 285 clrldi rt,rt,(64-VSID_BITS); \ 286 add rt,rt,rx; /* add high and low bits */ \ 287 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ 288 * 2^36-1+2^28-1. That in particular means that if r3 >= \ 289 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ 290 * the bit clear, r3 already has the answer we want, if it \ 291 * doesn't, the answer is the low 36 bits of r3+1. So in all \ 292 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ 293 addi rx,rt,1; \ 294 srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \ 295 add rt,rt,rx 296 297 298#ifndef __ASSEMBLY__ 299 300typedef unsigned long mm_context_id_t; 301 302typedef struct { 303 mm_context_id_t id; 304#ifdef CONFIG_HUGETLB_PAGE 305 pgd_t *huge_pgdir; 306 u16 htlb_segs; /* bitmask */ 307#endif 308} mm_context_t; 309 310 311static inline unsigned long vsid_scramble(unsigned long protovsid) 312{ 313#if 0 314 /* The code below is equivalent to this function for arguments 315 * < 2^VSID_BITS, which is all this should ever be called 316 * with. However gcc is not clever enough to compute the 317 * modulus (2^n-1) without a second multiply. */ 318 return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS); 319#else /* 1 */ 320 unsigned long x; 321 322 x = protovsid * VSID_MULTIPLIER; 323 x = (x >> VSID_BITS) + (x & VSID_MODULUS); 324 return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS; 325#endif /* 1 */ 326} 327 328/* This is only valid for addresses >= KERNELBASE */ 329static inline unsigned long get_kernel_vsid(unsigned long ea) 330{ 331 return vsid_scramble(ea >> SID_SHIFT); 332} 333 334/* This is only valid for user addresses (which are below 2^41) */ 335static inline unsigned long get_vsid(unsigned long context, unsigned long ea) 336{ 337 return vsid_scramble((context << USER_ESID_BITS) 338 | (ea >> SID_SHIFT)); 339} 340 341#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS) 342#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea)) 343 344#endif /* __ASSEMBLY */ 345 346#endif /* _PPC64_MMU_H_ */