Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.17-rc4 591 lines 20 kB view raw
1#ifndef _ASM_POWERPC_MMU_HASH64_H_ 2#define _ASM_POWERPC_MMU_HASH64_H_ 3/* 4 * PowerPC64 memory management structures 5 * 6 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> 7 * PPC64 rework. 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15#include <asm/asm-compat.h> 16#include <asm/page.h> 17 18/* 19 * This is necessary to get the definition of PGTABLE_RANGE which we 20 * need for various slices related matters. Note that this isn't the 21 * complete pgtable.h but only a portion of it. 22 */ 23#include <asm/pgtable-ppc64.h> 24#include <asm/bug.h> 25#include <asm/processor.h> 26 27/* 28 * SLB 29 */ 30 31#define SLB_NUM_BOLTED 3 32#define SLB_CACHE_ENTRIES 8 33#define SLB_MIN_SIZE 32 34 35/* Bits in the SLB ESID word */ 36#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ 37 38/* Bits in the SLB VSID word */ 39#define SLB_VSID_SHIFT 12 40#define SLB_VSID_SHIFT_1T 24 41#define SLB_VSID_SSIZE_SHIFT 62 42#define SLB_VSID_B ASM_CONST(0xc000000000000000) 43#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) 44#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) 45#define SLB_VSID_KS ASM_CONST(0x0000000000000800) 46#define SLB_VSID_KP ASM_CONST(0x0000000000000400) 47#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ 48#define SLB_VSID_L ASM_CONST(0x0000000000000100) 49#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ 50#define SLB_VSID_LP ASM_CONST(0x0000000000000030) 51#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) 52#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) 53#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) 54#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) 55#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) 56 57#define SLB_VSID_KERNEL (SLB_VSID_KP) 58#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) 59 60#define SLBIE_C (0x08000000) 61#define SLBIE_SSIZE_SHIFT 25 62 63/* 64 * Hash table 65 */ 66 67#define HPTES_PER_GROUP 8 68 69#define HPTE_V_SSIZE_SHIFT 62 70#define HPTE_V_AVPN_SHIFT 7 71#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80) 72#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) 73#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL)) 74#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) 75#define HPTE_V_LOCK ASM_CONST(0x0000000000000008) 76#define HPTE_V_LARGE ASM_CONST(0x0000000000000004) 77#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) 78#define HPTE_V_VALID ASM_CONST(0x0000000000000001) 79 80#define HPTE_R_PP0 ASM_CONST(0x8000000000000000) 81#define HPTE_R_TS ASM_CONST(0x4000000000000000) 82#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000) 83#define HPTE_R_RPN_SHIFT 12 84#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000) 85#define HPTE_R_PP ASM_CONST(0x0000000000000003) 86#define HPTE_R_N ASM_CONST(0x0000000000000004) 87#define HPTE_R_G ASM_CONST(0x0000000000000008) 88#define HPTE_R_M ASM_CONST(0x0000000000000010) 89#define HPTE_R_I ASM_CONST(0x0000000000000020) 90#define HPTE_R_W ASM_CONST(0x0000000000000040) 91#define HPTE_R_WIMG ASM_CONST(0x0000000000000078) 92#define HPTE_R_C ASM_CONST(0x0000000000000080) 93#define HPTE_R_R ASM_CONST(0x0000000000000100) 94#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00) 95 96#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000) 97#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) 98 99/* Values for PP (assumes Ks=0, Kp=1) */ 100#define PP_RWXX 0 /* Supervisor read/write, User none */ 101#define PP_RWRX 1 /* Supervisor read/write, User read */ 102#define PP_RWRW 2 /* Supervisor read/write, User read/write */ 103#define PP_RXRX 3 /* Supervisor read, User read */ 104#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */ 105 106/* Fields for tlbiel instruction in architecture 2.06 */ 107#define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */ 108#define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */ 109#define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */ 110#define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */ 111#define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */ 112#define TLBIEL_INVAL_SET_SHIFT 12 113 114#define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */ 115 116#ifndef __ASSEMBLY__ 117 118struct hash_pte { 119 __be64 v; 120 __be64 r; 121}; 122 123extern struct hash_pte *htab_address; 124extern unsigned long htab_size_bytes; 125extern unsigned long htab_hash_mask; 126 127/* 128 * Page size definition 129 * 130 * shift : is the "PAGE_SHIFT" value for that page size 131 * sllp : is a bit mask with the value of SLB L || LP to be or'ed 132 * directly to a slbmte "vsid" value 133 * penc : is the HPTE encoding mask for the "LP" field: 134 * 135 */ 136struct mmu_psize_def 137{ 138 unsigned int shift; /* number of bits */ 139 int penc[MMU_PAGE_COUNT]; /* HPTE encoding */ 140 unsigned int tlbiel; /* tlbiel supported for that page size */ 141 unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ 142 unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ 143}; 144extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; 145 146static inline int shift_to_mmu_psize(unsigned int shift) 147{ 148 int psize; 149 150 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) 151 if (mmu_psize_defs[psize].shift == shift) 152 return psize; 153 return -1; 154} 155 156static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) 157{ 158 if (mmu_psize_defs[mmu_psize].shift) 159 return mmu_psize_defs[mmu_psize].shift; 160 BUG(); 161} 162 163#endif /* __ASSEMBLY__ */ 164 165/* 166 * Segment sizes. 167 * These are the values used by hardware in the B field of 168 * SLB entries and the first dword of MMU hashtable entries. 169 * The B field is 2 bits; the values 2 and 3 are unused and reserved. 170 */ 171#define MMU_SEGSIZE_256M 0 172#define MMU_SEGSIZE_1T 1 173 174/* 175 * encode page number shift. 176 * in order to fit the 78 bit va in a 64 bit variable we shift the va by 177 * 12 bits. This enable us to address upto 76 bit va. 178 * For hpt hash from a va we can ignore the page size bits of va and for 179 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure 180 * we work in all cases including 4k page size. 181 */ 182#define VPN_SHIFT 12 183 184/* 185 * HPTE Large Page (LP) details 186 */ 187#define LP_SHIFT 12 188#define LP_BITS 8 189#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) 190 191#ifndef __ASSEMBLY__ 192 193static inline int segment_shift(int ssize) 194{ 195 if (ssize == MMU_SEGSIZE_256M) 196 return SID_SHIFT; 197 return SID_SHIFT_1T; 198} 199 200/* 201 * The current system page and segment sizes 202 */ 203extern int mmu_linear_psize; 204extern int mmu_virtual_psize; 205extern int mmu_vmalloc_psize; 206extern int mmu_vmemmap_psize; 207extern int mmu_io_psize; 208extern int mmu_kernel_ssize; 209extern int mmu_highuser_ssize; 210extern u16 mmu_slb_size; 211extern unsigned long tce_alloc_start, tce_alloc_end; 212 213/* 214 * If the processor supports 64k normal pages but not 64k cache 215 * inhibited pages, we have to be prepared to switch processes 216 * to use 4k pages when they create cache-inhibited mappings. 217 * If this is the case, mmu_ci_restrictions will be set to 1. 218 */ 219extern int mmu_ci_restrictions; 220 221/* 222 * This computes the AVPN and B fields of the first dword of a HPTE, 223 * for use when we want to match an existing PTE. The bottom 7 bits 224 * of the returned value are zero. 225 */ 226static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize, 227 int ssize) 228{ 229 unsigned long v; 230 /* 231 * The AVA field omits the low-order 23 bits of the 78 bits VA. 232 * These bits are not needed in the PTE, because the 233 * low-order b of these bits are part of the byte offset 234 * into the virtual page and, if b < 23, the high-order 235 * 23-b of these bits are always used in selecting the 236 * PTEGs to be searched 237 */ 238 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm); 239 v <<= HPTE_V_AVPN_SHIFT; 240 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; 241 return v; 242} 243 244/* 245 * This function sets the AVPN and L fields of the HPTE appropriately 246 * using the base page size and actual page size. 247 */ 248static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize, 249 int actual_psize, int ssize) 250{ 251 unsigned long v; 252 v = hpte_encode_avpn(vpn, base_psize, ssize); 253 if (actual_psize != MMU_PAGE_4K) 254 v |= HPTE_V_LARGE; 255 return v; 256} 257 258/* 259 * This function sets the ARPN, and LP fields of the HPTE appropriately 260 * for the page size. We assume the pa is already "clean" that is properly 261 * aligned for the requested page size 262 */ 263static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize, 264 int actual_psize) 265{ 266 /* A 4K page needs no special encoding */ 267 if (actual_psize == MMU_PAGE_4K) 268 return pa & HPTE_R_RPN; 269 else { 270 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize]; 271 unsigned int shift = mmu_psize_defs[actual_psize].shift; 272 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT); 273 } 274} 275 276/* 277 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size. 278 */ 279static inline unsigned long hpt_vpn(unsigned long ea, 280 unsigned long vsid, int ssize) 281{ 282 unsigned long mask; 283 int s_shift = segment_shift(ssize); 284 285 mask = (1ul << (s_shift - VPN_SHIFT)) - 1; 286 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask); 287} 288 289/* 290 * This hashes a virtual address 291 */ 292static inline unsigned long hpt_hash(unsigned long vpn, 293 unsigned int shift, int ssize) 294{ 295 int mask; 296 unsigned long hash, vsid; 297 298 /* VPN_SHIFT can be atmost 12 */ 299 if (ssize == MMU_SEGSIZE_256M) { 300 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1; 301 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^ 302 ((vpn & mask) >> (shift - VPN_SHIFT)); 303 } else { 304 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1; 305 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT); 306 hash = vsid ^ (vsid << 25) ^ 307 ((vpn & mask) >> (shift - VPN_SHIFT)) ; 308 } 309 return hash & 0x7fffffffffUL; 310} 311 312extern int __hash_page_4K(unsigned long ea, unsigned long access, 313 unsigned long vsid, pte_t *ptep, unsigned long trap, 314 unsigned int local, int ssize, int subpage_prot); 315extern int __hash_page_64K(unsigned long ea, unsigned long access, 316 unsigned long vsid, pte_t *ptep, unsigned long trap, 317 unsigned int local, int ssize); 318struct mm_struct; 319unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap); 320extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); 321int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, 322 pte_t *ptep, unsigned long trap, int local, int ssize, 323 unsigned int shift, unsigned int mmu_psize); 324#ifdef CONFIG_TRANSPARENT_HUGEPAGE 325extern int __hash_page_thp(unsigned long ea, unsigned long access, 326 unsigned long vsid, pmd_t *pmdp, unsigned long trap, 327 int local, int ssize, unsigned int psize); 328#else 329static inline int __hash_page_thp(unsigned long ea, unsigned long access, 330 unsigned long vsid, pmd_t *pmdp, 331 unsigned long trap, int local, 332 int ssize, unsigned int psize) 333{ 334 BUG(); 335 return -1; 336} 337#endif 338extern void hash_failure_debug(unsigned long ea, unsigned long access, 339 unsigned long vsid, unsigned long trap, 340 int ssize, int psize, int lpsize, 341 unsigned long pte); 342extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 343 unsigned long pstart, unsigned long prot, 344 int psize, int ssize); 345extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages); 346extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); 347 348extern void hpte_init_native(void); 349extern void hpte_init_lpar(void); 350extern void hpte_init_beat(void); 351extern void hpte_init_beat_v3(void); 352 353extern void slb_initialize(void); 354extern void slb_flush_and_rebolt(void); 355 356extern void slb_vmalloc_update(void); 357extern void slb_set_size(u16 size); 358#endif /* __ASSEMBLY__ */ 359 360/* 361 * VSID allocation (256MB segment) 362 * 363 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated 364 * from mmu context id and effective segment id of the address. 365 * 366 * For user processes max context id is limited to ((1ul << 19) - 5) 367 * for kernel space, we use the top 4 context ids to map address as below 368 * NOTE: each context only support 64TB now. 369 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] 370 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] 371 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] 372 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] 373 * 374 * The proto-VSIDs are then scrambled into real VSIDs with the 375 * multiplicative hash: 376 * 377 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS 378 * 379 * VSID_MULTIPLIER is prime, so in particular it is 380 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. 381 * Because the modulus is 2^n-1 we can compute it efficiently without 382 * a divide or extra multiply (see below). The scramble function gives 383 * robust scattering in the hash table (at least based on some initial 384 * results). 385 * 386 * We also consider VSID 0 special. We use VSID 0 for slb entries mapping 387 * bad address. This enables us to consolidate bad address handling in 388 * hash_page. 389 * 390 * We also need to avoid the last segment of the last context, because that 391 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0 392 * because of the modulo operation in vsid scramble. But the vmemmap 393 * (which is what uses region 0xf) will never be close to 64TB in size 394 * (it's 56 bytes per page of system memory). 395 */ 396 397#define CONTEXT_BITS 19 398#define ESID_BITS 18 399#define ESID_BITS_1T 6 400 401/* 402 * 256MB segment 403 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments 404 * available for user + kernel mapping. The top 4 contexts are used for 405 * kernel mapping. Each segment contains 2^28 bytes. Each 406 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts 407 * (19 == 37 + 28 - 46). 408 */ 409#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5) 410 411/* 412 * This should be computed such that protovosid * vsid_mulitplier 413 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus 414 */ 415#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ 416#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS) 417#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) 418 419#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ 420#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T) 421#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) 422 423 424#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT)) 425 426/* 427 * This macro generates asm code to compute the VSID scramble 428 * function. Used in slb_allocate() and do_stab_bolted. The function 429 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS 430 * 431 * rt = register continaing the proto-VSID and into which the 432 * VSID will be stored 433 * rx = scratch register (clobbered) 434 * 435 * - rt and rx must be different registers 436 * - The answer will end up in the low VSID_BITS bits of rt. The higher 437 * bits may contain other garbage, so you may need to mask the 438 * result. 439 */ 440#define ASM_VSID_SCRAMBLE(rt, rx, size) \ 441 lis rx,VSID_MULTIPLIER_##size@h; \ 442 ori rx,rx,VSID_MULTIPLIER_##size@l; \ 443 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \ 444 \ 445 srdi rx,rt,VSID_BITS_##size; \ 446 clrldi rt,rt,(64-VSID_BITS_##size); \ 447 add rt,rt,rx; /* add high and low bits */ \ 448 /* NOTE: explanation based on VSID_BITS_##size = 36 \ 449 * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ 450 * 2^36-1+2^28-1. That in particular means that if r3 >= \ 451 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ 452 * the bit clear, r3 already has the answer we want, if it \ 453 * doesn't, the answer is the low 36 bits of r3+1. So in all \ 454 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ 455 addi rx,rt,1; \ 456 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \ 457 add rt,rt,rx 458 459/* 4 bits per slice and we have one slice per 1TB */ 460#define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41) 461 462#ifndef __ASSEMBLY__ 463 464#ifdef CONFIG_PPC_SUBPAGE_PROT 465/* 466 * For the sub-page protection option, we extend the PGD with one of 467 * these. Basically we have a 3-level tree, with the top level being 468 * the protptrs array. To optimize speed and memory consumption when 469 * only addresses < 4GB are being protected, pointers to the first 470 * four pages of sub-page protection words are stored in the low_prot 471 * array. 472 * Each page of sub-page protection words protects 1GB (4 bytes 473 * protects 64k). For the 3-level tree, each page of pointers then 474 * protects 8TB. 475 */ 476struct subpage_prot_table { 477 unsigned long maxaddr; /* only addresses < this are protected */ 478 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)]; 479 unsigned int *low_prot[4]; 480}; 481 482#define SBP_L1_BITS (PAGE_SHIFT - 2) 483#define SBP_L2_BITS (PAGE_SHIFT - 3) 484#define SBP_L1_COUNT (1 << SBP_L1_BITS) 485#define SBP_L2_COUNT (1 << SBP_L2_BITS) 486#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) 487#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) 488 489extern void subpage_prot_free(struct mm_struct *mm); 490extern void subpage_prot_init_new_context(struct mm_struct *mm); 491#else 492static inline void subpage_prot_free(struct mm_struct *mm) {} 493static inline void subpage_prot_init_new_context(struct mm_struct *mm) { } 494#endif /* CONFIG_PPC_SUBPAGE_PROT */ 495 496typedef unsigned long mm_context_id_t; 497struct spinlock; 498 499typedef struct { 500 mm_context_id_t id; 501 u16 user_psize; /* page size index */ 502 503#ifdef CONFIG_PPC_MM_SLICES 504 u64 low_slices_psize; /* SLB page size encodings */ 505 unsigned char high_slices_psize[SLICE_ARRAY_SIZE]; 506#else 507 u16 sllp; /* SLB page size encoding */ 508#endif 509 unsigned long vdso_base; 510#ifdef CONFIG_PPC_SUBPAGE_PROT 511 struct subpage_prot_table spt; 512#endif /* CONFIG_PPC_SUBPAGE_PROT */ 513#ifdef CONFIG_PPC_ICSWX 514 struct spinlock *cop_lockp; /* guard acop and cop_pid */ 515 unsigned long acop; /* mask of enabled coprocessor types */ 516 unsigned int cop_pid; /* pid value used with coprocessors */ 517#endif /* CONFIG_PPC_ICSWX */ 518#ifdef CONFIG_PPC_64K_PAGES 519 /* for 4K PTE fragment support */ 520 void *pte_frag; 521#endif 522} mm_context_t; 523 524 525#if 0 526/* 527 * The code below is equivalent to this function for arguments 528 * < 2^VSID_BITS, which is all this should ever be called 529 * with. However gcc is not clever enough to compute the 530 * modulus (2^n-1) without a second multiply. 531 */ 532#define vsid_scramble(protovsid, size) \ 533 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size)) 534 535#else /* 1 */ 536#define vsid_scramble(protovsid, size) \ 537 ({ \ 538 unsigned long x; \ 539 x = (protovsid) * VSID_MULTIPLIER_##size; \ 540 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \ 541 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \ 542 }) 543#endif /* 1 */ 544 545/* Returns the segment size indicator for a user address */ 546static inline int user_segment_size(unsigned long addr) 547{ 548 /* Use 1T segments if possible for addresses >= 1T */ 549 if (addr >= (1UL << SID_SHIFT_1T)) 550 return mmu_highuser_ssize; 551 return MMU_SEGSIZE_256M; 552} 553 554static inline unsigned long get_vsid(unsigned long context, unsigned long ea, 555 int ssize) 556{ 557 /* 558 * Bad address. We return VSID 0 for that 559 */ 560 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) 561 return 0; 562 563 if (ssize == MMU_SEGSIZE_256M) 564 return vsid_scramble((context << ESID_BITS) 565 | (ea >> SID_SHIFT), 256M); 566 return vsid_scramble((context << ESID_BITS_1T) 567 | (ea >> SID_SHIFT_1T), 1T); 568} 569 570/* 571 * This is only valid for addresses >= PAGE_OFFSET 572 * 573 * For kernel space, we use the top 4 context ids to map address as below 574 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] 575 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] 576 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] 577 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] 578 */ 579static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) 580{ 581 unsigned long context; 582 583 /* 584 * kernel take the top 4 context from the available range 585 */ 586 context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1; 587 return get_vsid(context, ea, ssize); 588} 589#endif /* __ASSEMBLY__ */ 590 591#endif /* _ASM_POWERPC_MMU_HASH64_H_ */