Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.15-rc1 395 lines 12 kB view raw
1/* 2 * PowerPC memory management structures 3 * 4 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> 5 * PPC64 rework. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13#ifndef _PPC64_MMU_H_ 14#define _PPC64_MMU_H_ 15 16#include <linux/config.h> 17#include <asm/asm-compat.h> 18#include <asm/page.h> 19 20/* 21 * Segment table 22 */ 23 24#define STE_ESID_V 0x80 25#define STE_ESID_KS 0x20 26#define STE_ESID_KP 0x10 27#define STE_ESID_N 0x08 28 29#define STE_VSID_SHIFT 12 30 31/* Location of cpu0's segment table */ 32#define STAB0_PAGE 0x6 33#define STAB0_PHYS_ADDR (STAB0_PAGE<<12) 34 35#ifndef __ASSEMBLY__ 36extern char initial_stab[]; 37#endif /* ! __ASSEMBLY */ 38 39/* 40 * SLB 41 */ 42 43#define SLB_NUM_BOLTED 3 44#define SLB_CACHE_ENTRIES 8 45 46/* Bits in the SLB ESID word */ 47#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ 48 49/* Bits in the SLB VSID word */ 50#define SLB_VSID_SHIFT 12 51#define SLB_VSID_B ASM_CONST(0xc000000000000000) 52#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) 53#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) 54#define SLB_VSID_KS ASM_CONST(0x0000000000000800) 55#define SLB_VSID_KP ASM_CONST(0x0000000000000400) 56#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ 57#define SLB_VSID_L ASM_CONST(0x0000000000000100) 58#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ 59#define SLB_VSID_LP ASM_CONST(0x0000000000000030) 60#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) 61#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) 62#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) 63#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) 64#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) 65 66#define SLB_VSID_KERNEL (SLB_VSID_KP) 67#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) 68 69#define SLBIE_C (0x08000000) 70 71/* 72 * Hash table 73 */ 74 75#define HPTES_PER_GROUP 8 76 77#define HPTE_V_AVPN_SHIFT 7 78#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80) 79#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) 80#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN)) 81#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) 82#define HPTE_V_LOCK ASM_CONST(0x0000000000000008) 83#define HPTE_V_LARGE ASM_CONST(0x0000000000000004) 84#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) 85#define HPTE_V_VALID ASM_CONST(0x0000000000000001) 86 87#define HPTE_R_PP0 ASM_CONST(0x8000000000000000) 88#define HPTE_R_TS ASM_CONST(0x4000000000000000) 89#define HPTE_R_RPN_SHIFT 12 90#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) 91#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) 92#define HPTE_R_PP ASM_CONST(0x0000000000000003) 93#define HPTE_R_N ASM_CONST(0x0000000000000004) 94 95/* Values for PP (assumes Ks=0, Kp=1) */ 96/* pp0 will always be 0 for linux */ 97#define PP_RWXX 0 /* Supervisor read/write, User none */ 98#define PP_RWRX 1 /* Supervisor read/write, User read */ 99#define PP_RWRW 2 /* Supervisor read/write, User read/write */ 100#define PP_RXRX 3 /* Supervisor read, User read */ 101 102#ifndef __ASSEMBLY__ 103 104typedef struct { 105 unsigned long v; 106 unsigned long r; 107} hpte_t; 108 109extern hpte_t *htab_address; 110extern unsigned long htab_hash_mask; 111 112/* 113 * Page size definition 114 * 115 * shift : is the "PAGE_SHIFT" value for that page size 116 * sllp : is a bit mask with the value of SLB L || LP to be or'ed 117 * directly to a slbmte "vsid" value 118 * penc : is the HPTE encoding mask for the "LP" field: 119 * 120 */ 121struct mmu_psize_def 122{ 123 unsigned int shift; /* number of bits */ 124 unsigned int penc; /* HPTE encoding */ 125 unsigned int tlbiel; /* tlbiel supported for that page size */ 126 unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ 127 unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ 128}; 129 130#endif /* __ASSEMBLY__ */ 131 132/* 133 * The kernel use the constants below to index in the page sizes array. 134 * The use of fixed constants for this purpose is better for performances 135 * of the low level hash refill handlers. 136 * 137 * A non supported page size has a "shift" field set to 0 138 * 139 * Any new page size being implemented can get a new entry in here. Whether 140 * the kernel will use it or not is a different matter though. The actual page 141 * size used by hugetlbfs is not defined here and may be made variable 142 */ 143 144#define MMU_PAGE_4K 0 /* 4K */ 145#define MMU_PAGE_64K 1 /* 64K */ 146#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */ 147#define MMU_PAGE_1M 3 /* 1M */ 148#define MMU_PAGE_16M 4 /* 16M */ 149#define MMU_PAGE_16G 5 /* 16G */ 150#define MMU_PAGE_COUNT 6 151 152#ifndef __ASSEMBLY__ 153 154/* 155 * The current system page sizes 156 */ 157extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; 158extern int mmu_linear_psize; 159extern int mmu_virtual_psize; 160 161#ifdef CONFIG_HUGETLB_PAGE 162/* 163 * The page size index of the huge pages for use by hugetlbfs 164 */ 165extern int mmu_huge_psize; 166 167#endif /* CONFIG_HUGETLB_PAGE */ 168 169/* 170 * This function sets the AVPN and L fields of the HPTE appropriately 171 * for the page size 172 */ 173static inline unsigned long hpte_encode_v(unsigned long va, int psize) 174{ 175 unsigned long v = 176 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); 177 v <<= HPTE_V_AVPN_SHIFT; 178 if (psize != MMU_PAGE_4K) 179 v |= HPTE_V_LARGE; 180 return v; 181} 182 183/* 184 * This function sets the ARPN, and LP fields of the HPTE appropriately 185 * for the page size. We assume the pa is already "clean" that is properly 186 * aligned for the requested page size 187 */ 188static inline unsigned long hpte_encode_r(unsigned long pa, int psize) 189{ 190 unsigned long r; 191 192 /* A 4K page needs no special encoding */ 193 if (psize == MMU_PAGE_4K) 194 return pa & HPTE_R_RPN; 195 else { 196 unsigned int penc = mmu_psize_defs[psize].penc; 197 unsigned int shift = mmu_psize_defs[psize].shift; 198 return (pa & ~((1ul << shift) - 1)) | (penc << 12); 199 } 200 return r; 201} 202 203/* 204 * This hashes a virtual address for a 256Mb segment only for now 205 */ 206 207static inline unsigned long hpt_hash(unsigned long va, unsigned int shift) 208{ 209 return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift); 210} 211 212extern int __hash_page_4K(unsigned long ea, unsigned long access, 213 unsigned long vsid, pte_t *ptep, unsigned long trap, 214 unsigned int local); 215extern int __hash_page_64K(unsigned long ea, unsigned long access, 216 unsigned long vsid, pte_t *ptep, unsigned long trap, 217 unsigned int local); 218struct mm_struct; 219extern int hash_huge_page(struct mm_struct *mm, unsigned long access, 220 unsigned long ea, unsigned long vsid, int local); 221 222extern void htab_finish_init(void); 223extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 224 unsigned long pstart, unsigned long mode, 225 int psize); 226 227extern void htab_initialize(void); 228extern void htab_initialize_secondary(void); 229extern void hpte_init_native(void); 230extern void hpte_init_lpar(void); 231extern void hpte_init_iSeries(void); 232extern void mm_init_ppc64(void); 233 234extern long pSeries_lpar_hpte_insert(unsigned long hpte_group, 235 unsigned long va, unsigned long prpn, 236 unsigned long rflags, 237 unsigned long vflags, int psize); 238 239extern long native_hpte_insert(unsigned long hpte_group, 240 unsigned long va, unsigned long prpn, 241 unsigned long rflags, 242 unsigned long vflags, int psize); 243 244extern long iSeries_hpte_insert(unsigned long hpte_group, 245 unsigned long va, unsigned long prpn, 246 unsigned long rflags, 247 unsigned long vflags, int psize); 248 249extern void stabs_alloc(void); 250extern void slb_initialize(void); 251extern void stab_initialize(unsigned long stab); 252 253#endif /* __ASSEMBLY__ */ 254 255/* 256 * VSID allocation 257 * 258 * We first generate a 36-bit "proto-VSID". For kernel addresses this 259 * is equal to the ESID, for user addresses it is: 260 * (context << 15) | (esid & 0x7fff) 261 * 262 * The two forms are distinguishable because the top bit is 0 for user 263 * addresses, whereas the top two bits are 1 for kernel addresses. 264 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for 265 * now. 266 * 267 * The proto-VSIDs are then scrambled into real VSIDs with the 268 * multiplicative hash: 269 * 270 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS 271 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7 272 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF 273 * 274 * This scramble is only well defined for proto-VSIDs below 275 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are 276 * reserved. VSID_MULTIPLIER is prime, so in particular it is 277 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. 278 * Because the modulus is 2^n-1 we can compute it efficiently without 279 * a divide or extra multiply (see below). 280 * 281 * This scheme has several advantages over older methods: 282 * 283 * - We have VSIDs allocated for every kernel address 284 * (i.e. everything above 0xC000000000000000), except the very top 285 * segment, which simplifies several things. 286 * 287 * - We allow for 15 significant bits of ESID and 20 bits of 288 * context for user addresses. i.e. 8T (43 bits) of address space for 289 * up to 1M contexts (although the page table structure and context 290 * allocation will need changes to take advantage of this). 291 * 292 * - The scramble function gives robust scattering in the hash 293 * table (at least based on some initial results). The previous 294 * method was more susceptible to pathological cases giving excessive 295 * hash collisions. 296 */ 297/* 298 * WARNING - If you change these you must make sure the asm 299 * implementations in slb_allocate (slb_low.S), do_stab_bolted 300 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. 301 * 302 * You'll also need to change the precomputed VSID values in head.S 303 * which are used by the iSeries firmware. 304 */ 305 306#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */ 307#define VSID_BITS 36 308#define VSID_MODULUS ((1UL<<VSID_BITS)-1) 309 310#define CONTEXT_BITS 19 311#define USER_ESID_BITS 16 312 313#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) 314 315/* 316 * This macro generates asm code to compute the VSID scramble 317 * function. Used in slb_allocate() and do_stab_bolted. The function 318 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS 319 * 320 * rt = register continaing the proto-VSID and into which the 321 * VSID will be stored 322 * rx = scratch register (clobbered) 323 * 324 * - rt and rx must be different registers 325 * - The answer will end up in the low 36 bits of rt. The higher 326 * bits may contain other garbage, so you may need to mask the 327 * result. 328 */ 329#define ASM_VSID_SCRAMBLE(rt, rx) \ 330 lis rx,VSID_MULTIPLIER@h; \ 331 ori rx,rx,VSID_MULTIPLIER@l; \ 332 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \ 333 \ 334 srdi rx,rt,VSID_BITS; \ 335 clrldi rt,rt,(64-VSID_BITS); \ 336 add rt,rt,rx; /* add high and low bits */ \ 337 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ 338 * 2^36-1+2^28-1. That in particular means that if r3 >= \ 339 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ 340 * the bit clear, r3 already has the answer we want, if it \ 341 * doesn't, the answer is the low 36 bits of r3+1. So in all \ 342 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ 343 addi rx,rt,1; \ 344 srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \ 345 add rt,rt,rx 346 347 348#ifndef __ASSEMBLY__ 349 350typedef unsigned long mm_context_id_t; 351 352typedef struct { 353 mm_context_id_t id; 354#ifdef CONFIG_HUGETLB_PAGE 355 u16 low_htlb_areas, high_htlb_areas; 356#endif 357} mm_context_t; 358 359 360static inline unsigned long vsid_scramble(unsigned long protovsid) 361{ 362#if 0 363 /* The code below is equivalent to this function for arguments 364 * < 2^VSID_BITS, which is all this should ever be called 365 * with. However gcc is not clever enough to compute the 366 * modulus (2^n-1) without a second multiply. */ 367 return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS); 368#else /* 1 */ 369 unsigned long x; 370 371 x = protovsid * VSID_MULTIPLIER; 372 x = (x >> VSID_BITS) + (x & VSID_MODULUS); 373 return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS; 374#endif /* 1 */ 375} 376 377/* This is only valid for addresses >= KERNELBASE */ 378static inline unsigned long get_kernel_vsid(unsigned long ea) 379{ 380 return vsid_scramble(ea >> SID_SHIFT); 381} 382 383/* This is only valid for user addresses (which are below 2^41) */ 384static inline unsigned long get_vsid(unsigned long context, unsigned long ea) 385{ 386 return vsid_scramble((context << USER_ESID_BITS) 387 | (ea >> SID_SHIFT)); 388} 389 390#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS) 391#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea)) 392 393#endif /* __ASSEMBLY */ 394 395#endif /* _PPC64_MMU_H_ */