Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.7-rc7 1287 lines 35 kB view raw
1/* 2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen 3 * {mikejc|engebret}@us.ibm.com 4 * 5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com> 6 * 7 * SMP scalability work: 8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 9 * 10 * Module name: htab.c 11 * 12 * Description: 13 * PowerPC Hashed Page Table functions 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 */ 20 21#undef DEBUG 22#undef DEBUG_LOW 23 24#include <linux/spinlock.h> 25#include <linux/errno.h> 26#include <linux/sched.h> 27#include <linux/proc_fs.h> 28#include <linux/stat.h> 29#include <linux/sysctl.h> 30#include <linux/export.h> 31#include <linux/ctype.h> 32#include <linux/cache.h> 33#include <linux/init.h> 34#include <linux/signal.h> 35#include <linux/memblock.h> 36 37#include <asm/processor.h> 38#include <asm/pgtable.h> 39#include <asm/mmu.h> 40#include <asm/mmu_context.h> 41#include <asm/page.h> 42#include <asm/types.h> 43#include <asm/uaccess.h> 44#include <asm/machdep.h> 45#include <asm/prom.h> 46#include <asm/tlbflush.h> 47#include <asm/io.h> 48#include <asm/eeh.h> 49#include <asm/tlb.h> 50#include <asm/cacheflush.h> 51#include <asm/cputable.h> 52#include <asm/sections.h> 53#include <asm/spu.h> 54#include <asm/udbg.h> 55#include <asm/code-patching.h> 56#include <asm/fadump.h> 57#include <asm/firmware.h> 58 59#ifdef DEBUG 60#define DBG(fmt...) udbg_printf(fmt) 61#else 62#define DBG(fmt...) 63#endif 64 65#ifdef DEBUG_LOW 66#define DBG_LOW(fmt...) udbg_printf(fmt) 67#else 68#define DBG_LOW(fmt...) 69#endif 70 71#define KB (1024) 72#define MB (1024*KB) 73#define GB (1024L*MB) 74 75/* 76 * Note: pte --> Linux PTE 77 * HPTE --> PowerPC Hashed Page Table Entry 78 * 79 * Execution context: 80 * htab_initialize is called with the MMU off (of course), but 81 * the kernel has been copied down to zero so it can directly 82 * reference global data. At this point it is very difficult 83 * to print debug info. 84 * 85 */ 86 87#ifdef CONFIG_U3_DART 88extern unsigned long dart_tablebase; 89#endif /* CONFIG_U3_DART */ 90 91static unsigned long _SDR1; 92struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; 93 94struct hash_pte *htab_address; 95unsigned long htab_size_bytes; 96unsigned long htab_hash_mask; 97EXPORT_SYMBOL_GPL(htab_hash_mask); 98int mmu_linear_psize = MMU_PAGE_4K; 99int mmu_virtual_psize = MMU_PAGE_4K; 100int mmu_vmalloc_psize = MMU_PAGE_4K; 101#ifdef CONFIG_SPARSEMEM_VMEMMAP 102int mmu_vmemmap_psize = MMU_PAGE_4K; 103#endif 104int mmu_io_psize = MMU_PAGE_4K; 105int mmu_kernel_ssize = MMU_SEGSIZE_256M; 106int mmu_highuser_ssize = MMU_SEGSIZE_256M; 107u16 mmu_slb_size = 64; 108EXPORT_SYMBOL_GPL(mmu_slb_size); 109#ifdef CONFIG_PPC_64K_PAGES 110int mmu_ci_restrictions; 111#endif 112#ifdef CONFIG_DEBUG_PAGEALLOC 113static u8 *linear_map_hash_slots; 114static unsigned long linear_map_hash_count; 115static DEFINE_SPINLOCK(linear_map_hash_lock); 116#endif /* CONFIG_DEBUG_PAGEALLOC */ 117 118/* There are definitions of page sizes arrays to be used when none 119 * is provided by the firmware. 120 */ 121 122/* Pre-POWER4 CPUs (4k pages only) 123 */ 124static struct mmu_psize_def mmu_psize_defaults_old[] = { 125 [MMU_PAGE_4K] = { 126 .shift = 12, 127 .sllp = 0, 128 .penc = 0, 129 .avpnm = 0, 130 .tlbiel = 0, 131 }, 132}; 133 134/* POWER4, GPUL, POWER5 135 * 136 * Support for 16Mb large pages 137 */ 138static struct mmu_psize_def mmu_psize_defaults_gp[] = { 139 [MMU_PAGE_4K] = { 140 .shift = 12, 141 .sllp = 0, 142 .penc = 0, 143 .avpnm = 0, 144 .tlbiel = 1, 145 }, 146 [MMU_PAGE_16M] = { 147 .shift = 24, 148 .sllp = SLB_VSID_L, 149 .penc = 0, 150 .avpnm = 0x1UL, 151 .tlbiel = 0, 152 }, 153}; 154 155static unsigned long htab_convert_pte_flags(unsigned long pteflags) 156{ 157 unsigned long rflags = pteflags & 0x1fa; 158 159 /* _PAGE_EXEC -> NOEXEC */ 160 if ((pteflags & _PAGE_EXEC) == 0) 161 rflags |= HPTE_R_N; 162 163 /* PP bits. PAGE_USER is already PP bit 0x2, so we only 164 * need to add in 0x1 if it's a read-only user page 165 */ 166 if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) && 167 (pteflags & _PAGE_DIRTY))) 168 rflags |= 1; 169 170 /* Always add C */ 171 return rflags | HPTE_R_C; 172} 173 174int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 175 unsigned long pstart, unsigned long prot, 176 int psize, int ssize) 177{ 178 unsigned long vaddr, paddr; 179 unsigned int step, shift; 180 int ret = 0; 181 182 shift = mmu_psize_defs[psize].shift; 183 step = 1 << shift; 184 185 prot = htab_convert_pte_flags(prot); 186 187 DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n", 188 vstart, vend, pstart, prot, psize, ssize); 189 190 for (vaddr = vstart, paddr = pstart; vaddr < vend; 191 vaddr += step, paddr += step) { 192 unsigned long hash, hpteg; 193 unsigned long vsid = get_kernel_vsid(vaddr, ssize); 194 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); 195 unsigned long tprot = prot; 196 197 /* Make kernel text executable */ 198 if (overlaps_kernel_text(vaddr, vaddr + step)) 199 tprot &= ~HPTE_R_N; 200 201 hash = hpt_hash(vpn, shift, ssize); 202 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 203 204 BUG_ON(!ppc_md.hpte_insert); 205 ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot, 206 HPTE_V_BOLTED, psize, ssize); 207 208 if (ret < 0) 209 break; 210#ifdef CONFIG_DEBUG_PAGEALLOC 211 if ((paddr >> PAGE_SHIFT) < linear_map_hash_count) 212 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; 213#endif /* CONFIG_DEBUG_PAGEALLOC */ 214 } 215 return ret < 0 ? ret : 0; 216} 217 218#ifdef CONFIG_MEMORY_HOTPLUG 219static int htab_remove_mapping(unsigned long vstart, unsigned long vend, 220 int psize, int ssize) 221{ 222 unsigned long vaddr; 223 unsigned int step, shift; 224 225 shift = mmu_psize_defs[psize].shift; 226 step = 1 << shift; 227 228 if (!ppc_md.hpte_removebolted) { 229 printk(KERN_WARNING "Platform doesn't implement " 230 "hpte_removebolted\n"); 231 return -EINVAL; 232 } 233 234 for (vaddr = vstart; vaddr < vend; vaddr += step) 235 ppc_md.hpte_removebolted(vaddr, psize, ssize); 236 237 return 0; 238} 239#endif /* CONFIG_MEMORY_HOTPLUG */ 240 241static int __init htab_dt_scan_seg_sizes(unsigned long node, 242 const char *uname, int depth, 243 void *data) 244{ 245 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 246 u32 *prop; 247 unsigned long size = 0; 248 249 /* We are scanning "cpu" nodes only */ 250 if (type == NULL || strcmp(type, "cpu") != 0) 251 return 0; 252 253 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", 254 &size); 255 if (prop == NULL) 256 return 0; 257 for (; size >= 4; size -= 4, ++prop) { 258 if (prop[0] == 40) { 259 DBG("1T segment support detected\n"); 260 cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT; 261 return 1; 262 } 263 } 264 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; 265 return 0; 266} 267 268static void __init htab_init_seg_sizes(void) 269{ 270 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL); 271} 272 273static int __init htab_dt_scan_page_sizes(unsigned long node, 274 const char *uname, int depth, 275 void *data) 276{ 277 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 278 u32 *prop; 279 unsigned long size = 0; 280 281 /* We are scanning "cpu" nodes only */ 282 if (type == NULL || strcmp(type, "cpu") != 0) 283 return 0; 284 285 prop = (u32 *)of_get_flat_dt_prop(node, 286 "ibm,segment-page-sizes", &size); 287 if (prop != NULL) { 288 DBG("Page sizes from device-tree:\n"); 289 size /= 4; 290 cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); 291 while(size > 0) { 292 unsigned int shift = prop[0]; 293 unsigned int slbenc = prop[1]; 294 unsigned int lpnum = prop[2]; 295 unsigned int lpenc = 0; 296 struct mmu_psize_def *def; 297 int idx = -1; 298 299 size -= 3; prop += 3; 300 while(size > 0 && lpnum) { 301 if (prop[0] == shift) 302 lpenc = prop[1]; 303 prop += 2; size -= 2; 304 lpnum--; 305 } 306 switch(shift) { 307 case 0xc: 308 idx = MMU_PAGE_4K; 309 break; 310 case 0x10: 311 idx = MMU_PAGE_64K; 312 break; 313 case 0x14: 314 idx = MMU_PAGE_1M; 315 break; 316 case 0x18: 317 idx = MMU_PAGE_16M; 318 cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE; 319 break; 320 case 0x22: 321 idx = MMU_PAGE_16G; 322 break; 323 } 324 if (idx < 0) 325 continue; 326 def = &mmu_psize_defs[idx]; 327 def->shift = shift; 328 if (shift <= 23) 329 def->avpnm = 0; 330 else 331 def->avpnm = (1 << (shift - 23)) - 1; 332 def->sllp = slbenc; 333 def->penc = lpenc; 334 /* We don't know for sure what's up with tlbiel, so 335 * for now we only set it for 4K and 64K pages 336 */ 337 if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K) 338 def->tlbiel = 1; 339 else 340 def->tlbiel = 0; 341 342 DBG(" %d: shift=%02x, sllp=%04lx, avpnm=%08lx, " 343 "tlbiel=%d, penc=%d\n", 344 idx, shift, def->sllp, def->avpnm, def->tlbiel, 345 def->penc); 346 } 347 return 1; 348 } 349 return 0; 350} 351 352#ifdef CONFIG_HUGETLB_PAGE 353/* Scan for 16G memory blocks that have been set aside for huge pages 354 * and reserve those blocks for 16G huge pages. 355 */ 356static int __init htab_dt_scan_hugepage_blocks(unsigned long node, 357 const char *uname, int depth, 358 void *data) { 359 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 360 unsigned long *addr_prop; 361 u32 *page_count_prop; 362 unsigned int expected_pages; 363 long unsigned int phys_addr; 364 long unsigned int block_size; 365 366 /* We are scanning "memory" nodes only */ 367 if (type == NULL || strcmp(type, "memory") != 0) 368 return 0; 369 370 /* This property is the log base 2 of the number of virtual pages that 371 * will represent this memory block. */ 372 page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL); 373 if (page_count_prop == NULL) 374 return 0; 375 expected_pages = (1 << page_count_prop[0]); 376 addr_prop = of_get_flat_dt_prop(node, "reg", NULL); 377 if (addr_prop == NULL) 378 return 0; 379 phys_addr = addr_prop[0]; 380 block_size = addr_prop[1]; 381 if (block_size != (16 * GB)) 382 return 0; 383 printk(KERN_INFO "Huge page(16GB) memory: " 384 "addr = 0x%lX size = 0x%lX pages = %d\n", 385 phys_addr, block_size, expected_pages); 386 if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) { 387 memblock_reserve(phys_addr, block_size * expected_pages); 388 add_gpage(phys_addr, block_size, expected_pages); 389 } 390 return 0; 391} 392#endif /* CONFIG_HUGETLB_PAGE */ 393 394static void __init htab_init_page_sizes(void) 395{ 396 int rc; 397 398 /* Default to 4K pages only */ 399 memcpy(mmu_psize_defs, mmu_psize_defaults_old, 400 sizeof(mmu_psize_defaults_old)); 401 402 /* 403 * Try to find the available page sizes in the device-tree 404 */ 405 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL); 406 if (rc != 0) /* Found */ 407 goto found; 408 409 /* 410 * Not in the device-tree, let's fallback on known size 411 * list for 16M capable GP & GR 412 */ 413 if (mmu_has_feature(MMU_FTR_16M_PAGE)) 414 memcpy(mmu_psize_defs, mmu_psize_defaults_gp, 415 sizeof(mmu_psize_defaults_gp)); 416 found: 417#ifndef CONFIG_DEBUG_PAGEALLOC 418 /* 419 * Pick a size for the linear mapping. Currently, we only support 420 * 16M, 1M and 4K which is the default 421 */ 422 if (mmu_psize_defs[MMU_PAGE_16M].shift) 423 mmu_linear_psize = MMU_PAGE_16M; 424 else if (mmu_psize_defs[MMU_PAGE_1M].shift) 425 mmu_linear_psize = MMU_PAGE_1M; 426#endif /* CONFIG_DEBUG_PAGEALLOC */ 427 428#ifdef CONFIG_PPC_64K_PAGES 429 /* 430 * Pick a size for the ordinary pages. Default is 4K, we support 431 * 64K for user mappings and vmalloc if supported by the processor. 432 * We only use 64k for ioremap if the processor 433 * (and firmware) support cache-inhibited large pages. 434 * If not, we use 4k and set mmu_ci_restrictions so that 435 * hash_page knows to switch processes that use cache-inhibited 436 * mappings to 4k pages. 437 */ 438 if (mmu_psize_defs[MMU_PAGE_64K].shift) { 439 mmu_virtual_psize = MMU_PAGE_64K; 440 mmu_vmalloc_psize = MMU_PAGE_64K; 441 if (mmu_linear_psize == MMU_PAGE_4K) 442 mmu_linear_psize = MMU_PAGE_64K; 443 if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) { 444 /* 445 * Don't use 64k pages for ioremap on pSeries, since 446 * that would stop us accessing the HEA ethernet. 447 */ 448 if (!machine_is(pseries)) 449 mmu_io_psize = MMU_PAGE_64K; 450 } else 451 mmu_ci_restrictions = 1; 452 } 453#endif /* CONFIG_PPC_64K_PAGES */ 454 455#ifdef CONFIG_SPARSEMEM_VMEMMAP 456 /* We try to use 16M pages for vmemmap if that is supported 457 * and we have at least 1G of RAM at boot 458 */ 459 if (mmu_psize_defs[MMU_PAGE_16M].shift && 460 memblock_phys_mem_size() >= 0x40000000) 461 mmu_vmemmap_psize = MMU_PAGE_16M; 462 else if (mmu_psize_defs[MMU_PAGE_64K].shift) 463 mmu_vmemmap_psize = MMU_PAGE_64K; 464 else 465 mmu_vmemmap_psize = MMU_PAGE_4K; 466#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 467 468 printk(KERN_DEBUG "Page orders: linear mapping = %d, " 469 "virtual = %d, io = %d" 470#ifdef CONFIG_SPARSEMEM_VMEMMAP 471 ", vmemmap = %d" 472#endif 473 "\n", 474 mmu_psize_defs[mmu_linear_psize].shift, 475 mmu_psize_defs[mmu_virtual_psize].shift, 476 mmu_psize_defs[mmu_io_psize].shift 477#ifdef CONFIG_SPARSEMEM_VMEMMAP 478 ,mmu_psize_defs[mmu_vmemmap_psize].shift 479#endif 480 ); 481 482#ifdef CONFIG_HUGETLB_PAGE 483 /* Reserve 16G huge page memory sections for huge pages */ 484 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); 485#endif /* CONFIG_HUGETLB_PAGE */ 486} 487 488static int __init htab_dt_scan_pftsize(unsigned long node, 489 const char *uname, int depth, 490 void *data) 491{ 492 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 493 u32 *prop; 494 495 /* We are scanning "cpu" nodes only */ 496 if (type == NULL || strcmp(type, "cpu") != 0) 497 return 0; 498 499 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL); 500 if (prop != NULL) { 501 /* pft_size[0] is the NUMA CEC cookie */ 502 ppc64_pft_size = prop[1]; 503 return 1; 504 } 505 return 0; 506} 507 508static unsigned long __init htab_get_table_size(void) 509{ 510 unsigned long mem_size, rnd_mem_size, pteg_count, psize; 511 512 /* If hash size isn't already provided by the platform, we try to 513 * retrieve it from the device-tree. If it's not there neither, we 514 * calculate it now based on the total RAM size 515 */ 516 if (ppc64_pft_size == 0) 517 of_scan_flat_dt(htab_dt_scan_pftsize, NULL); 518 if (ppc64_pft_size) 519 return 1UL << ppc64_pft_size; 520 521 /* round mem_size up to next power of 2 */ 522 mem_size = memblock_phys_mem_size(); 523 rnd_mem_size = 1UL << __ilog2(mem_size); 524 if (rnd_mem_size < mem_size) 525 rnd_mem_size <<= 1; 526 527 /* # pages / 2 */ 528 psize = mmu_psize_defs[mmu_virtual_psize].shift; 529 pteg_count = max(rnd_mem_size >> (psize + 1), 1UL << 11); 530 531 return pteg_count << 7; 532} 533 534#ifdef CONFIG_MEMORY_HOTPLUG 535int create_section_mapping(unsigned long start, unsigned long end) 536{ 537 return htab_bolt_mapping(start, end, __pa(start), 538 pgprot_val(PAGE_KERNEL), mmu_linear_psize, 539 mmu_kernel_ssize); 540} 541 542int remove_section_mapping(unsigned long start, unsigned long end) 543{ 544 return htab_remove_mapping(start, end, mmu_linear_psize, 545 mmu_kernel_ssize); 546} 547#endif /* CONFIG_MEMORY_HOTPLUG */ 548 549#define FUNCTION_TEXT(A) ((*(unsigned long *)(A))) 550 551static void __init htab_finish_init(void) 552{ 553 extern unsigned int *htab_call_hpte_insert1; 554 extern unsigned int *htab_call_hpte_insert2; 555 extern unsigned int *htab_call_hpte_remove; 556 extern unsigned int *htab_call_hpte_updatepp; 557 558#ifdef CONFIG_PPC_HAS_HASH_64K 559 extern unsigned int *ht64_call_hpte_insert1; 560 extern unsigned int *ht64_call_hpte_insert2; 561 extern unsigned int *ht64_call_hpte_remove; 562 extern unsigned int *ht64_call_hpte_updatepp; 563 564 patch_branch(ht64_call_hpte_insert1, 565 FUNCTION_TEXT(ppc_md.hpte_insert), 566 BRANCH_SET_LINK); 567 patch_branch(ht64_call_hpte_insert2, 568 FUNCTION_TEXT(ppc_md.hpte_insert), 569 BRANCH_SET_LINK); 570 patch_branch(ht64_call_hpte_remove, 571 FUNCTION_TEXT(ppc_md.hpte_remove), 572 BRANCH_SET_LINK); 573 patch_branch(ht64_call_hpte_updatepp, 574 FUNCTION_TEXT(ppc_md.hpte_updatepp), 575 BRANCH_SET_LINK); 576 577#endif /* CONFIG_PPC_HAS_HASH_64K */ 578 579 patch_branch(htab_call_hpte_insert1, 580 FUNCTION_TEXT(ppc_md.hpte_insert), 581 BRANCH_SET_LINK); 582 patch_branch(htab_call_hpte_insert2, 583 FUNCTION_TEXT(ppc_md.hpte_insert), 584 BRANCH_SET_LINK); 585 patch_branch(htab_call_hpte_remove, 586 FUNCTION_TEXT(ppc_md.hpte_remove), 587 BRANCH_SET_LINK); 588 patch_branch(htab_call_hpte_updatepp, 589 FUNCTION_TEXT(ppc_md.hpte_updatepp), 590 BRANCH_SET_LINK); 591} 592 593static void __init htab_initialize(void) 594{ 595 unsigned long table; 596 unsigned long pteg_count; 597 unsigned long prot; 598 unsigned long base = 0, size = 0, limit; 599 struct memblock_region *reg; 600 601 DBG(" -> htab_initialize()\n"); 602 603 /* Initialize segment sizes */ 604 htab_init_seg_sizes(); 605 606 /* Initialize page sizes */ 607 htab_init_page_sizes(); 608 609 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { 610 mmu_kernel_ssize = MMU_SEGSIZE_1T; 611 mmu_highuser_ssize = MMU_SEGSIZE_1T; 612 printk(KERN_INFO "Using 1TB segments\n"); 613 } 614 615 /* 616 * Calculate the required size of the htab. We want the number of 617 * PTEGs to equal one half the number of real pages. 618 */ 619 htab_size_bytes = htab_get_table_size(); 620 pteg_count = htab_size_bytes >> 7; 621 622 htab_hash_mask = pteg_count - 1; 623 624 if (firmware_has_feature(FW_FEATURE_LPAR)) { 625 /* Using a hypervisor which owns the htab */ 626 htab_address = NULL; 627 _SDR1 = 0; 628#ifdef CONFIG_FA_DUMP 629 /* 630 * If firmware assisted dump is active firmware preserves 631 * the contents of htab along with entire partition memory. 632 * Clear the htab if firmware assisted dump is active so 633 * that we dont end up using old mappings. 634 */ 635 if (is_fadump_active() && ppc_md.hpte_clear_all) 636 ppc_md.hpte_clear_all(); 637#endif 638 } else { 639 /* Find storage for the HPT. Must be contiguous in 640 * the absolute address space. On cell we want it to be 641 * in the first 2 Gig so we can use it for IOMMU hacks. 642 */ 643 if (machine_is(cell)) 644 limit = 0x80000000; 645 else 646 limit = MEMBLOCK_ALLOC_ANYWHERE; 647 648 table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); 649 650 DBG("Hash table allocated at %lx, size: %lx\n", table, 651 htab_size_bytes); 652 653 htab_address = __va(table); 654 655 /* htab absolute addr + encoded htabsize */ 656 _SDR1 = table + __ilog2(pteg_count) - 11; 657 658 /* Initialize the HPT with no entries */ 659 memset((void *)table, 0, htab_size_bytes); 660 661 /* Set SDR1 */ 662 mtspr(SPRN_SDR1, _SDR1); 663 } 664 665 prot = pgprot_val(PAGE_KERNEL); 666 667#ifdef CONFIG_DEBUG_PAGEALLOC 668 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; 669 linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, 670 1, ppc64_rma_size)); 671 memset(linear_map_hash_slots, 0, linear_map_hash_count); 672#endif /* CONFIG_DEBUG_PAGEALLOC */ 673 674 /* On U3 based machines, we need to reserve the DART area and 675 * _NOT_ map it to avoid cache paradoxes as it's remapped non 676 * cacheable later on 677 */ 678 679 /* create bolted the linear mapping in the hash table */ 680 for_each_memblock(memory, reg) { 681 base = (unsigned long)__va(reg->base); 682 size = reg->size; 683 684 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", 685 base, size, prot); 686 687#ifdef CONFIG_U3_DART 688 /* Do not map the DART space. Fortunately, it will be aligned 689 * in such a way that it will not cross two memblock regions and 690 * will fit within a single 16Mb page. 691 * The DART space is assumed to be a full 16Mb region even if 692 * we only use 2Mb of that space. We will use more of it later 693 * for AGP GART. We have to use a full 16Mb large page. 694 */ 695 DBG("DART base: %lx\n", dart_tablebase); 696 697 if (dart_tablebase != 0 && dart_tablebase >= base 698 && dart_tablebase < (base + size)) { 699 unsigned long dart_table_end = dart_tablebase + 16 * MB; 700 if (base != dart_tablebase) 701 BUG_ON(htab_bolt_mapping(base, dart_tablebase, 702 __pa(base), prot, 703 mmu_linear_psize, 704 mmu_kernel_ssize)); 705 if ((base + size) > dart_table_end) 706 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB, 707 base + size, 708 __pa(dart_table_end), 709 prot, 710 mmu_linear_psize, 711 mmu_kernel_ssize)); 712 continue; 713 } 714#endif /* CONFIG_U3_DART */ 715 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), 716 prot, mmu_linear_psize, mmu_kernel_ssize)); 717 } 718 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); 719 720 /* 721 * If we have a memory_limit and we've allocated TCEs then we need to 722 * explicitly map the TCE area at the top of RAM. We also cope with the 723 * case that the TCEs start below memory_limit. 724 * tce_alloc_start/end are 16MB aligned so the mapping should work 725 * for either 4K or 16MB pages. 726 */ 727 if (tce_alloc_start) { 728 tce_alloc_start = (unsigned long)__va(tce_alloc_start); 729 tce_alloc_end = (unsigned long)__va(tce_alloc_end); 730 731 if (base + size >= tce_alloc_start) 732 tce_alloc_start = base + size + 1; 733 734 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end, 735 __pa(tce_alloc_start), prot, 736 mmu_linear_psize, mmu_kernel_ssize)); 737 } 738 739 htab_finish_init(); 740 741 DBG(" <- htab_initialize()\n"); 742} 743#undef KB 744#undef MB 745 746void __init early_init_mmu(void) 747{ 748 /* Setup initial STAB address in the PACA */ 749 get_paca()->stab_real = __pa((u64)&initial_stab); 750 get_paca()->stab_addr = (u64)&initial_stab; 751 752 /* Initialize the MMU Hash table and create the linear mapping 753 * of memory. Has to be done before stab/slb initialization as 754 * this is currently where the page size encoding is obtained 755 */ 756 htab_initialize(); 757 758 /* Initialize stab / SLB management */ 759 if (mmu_has_feature(MMU_FTR_SLB)) 760 slb_initialize(); 761} 762 763#ifdef CONFIG_SMP 764void __cpuinit early_init_mmu_secondary(void) 765{ 766 /* Initialize hash table for that CPU */ 767 if (!firmware_has_feature(FW_FEATURE_LPAR)) 768 mtspr(SPRN_SDR1, _SDR1); 769 770 /* Initialize STAB/SLB. We use a virtual address as it works 771 * in real mode on pSeries. 772 */ 773 if (mmu_has_feature(MMU_FTR_SLB)) 774 slb_initialize(); 775 else 776 stab_initialize(get_paca()->stab_addr); 777} 778#endif /* CONFIG_SMP */ 779 780/* 781 * Called by asm hashtable.S for doing lazy icache flush 782 */ 783unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) 784{ 785 struct page *page; 786 787 if (!pfn_valid(pte_pfn(pte))) 788 return pp; 789 790 page = pte_page(pte); 791 792 /* page is dirty */ 793 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { 794 if (trap == 0x400) { 795 flush_dcache_icache_page(page); 796 set_bit(PG_arch_1, &page->flags); 797 } else 798 pp |= HPTE_R_N; 799 } 800 return pp; 801} 802 803#ifdef CONFIG_PPC_MM_SLICES 804unsigned int get_paca_psize(unsigned long addr) 805{ 806 u64 lpsizes; 807 unsigned char *hpsizes; 808 unsigned long index, mask_index; 809 810 if (addr < SLICE_LOW_TOP) { 811 lpsizes = get_paca()->context.low_slices_psize; 812 index = GET_LOW_SLICE_INDEX(addr); 813 return (lpsizes >> (index * 4)) & 0xF; 814 } 815 hpsizes = get_paca()->context.high_slices_psize; 816 index = GET_HIGH_SLICE_INDEX(addr); 817 mask_index = index & 0x1; 818 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF; 819} 820 821#else 822unsigned int get_paca_psize(unsigned long addr) 823{ 824 return get_paca()->context.user_psize; 825} 826#endif 827 828/* 829 * Demote a segment to using 4k pages. 830 * For now this makes the whole process use 4k pages. 831 */ 832#ifdef CONFIG_PPC_64K_PAGES 833void demote_segment_4k(struct mm_struct *mm, unsigned long addr) 834{ 835 if (get_slice_psize(mm, addr) == MMU_PAGE_4K) 836 return; 837 slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); 838#ifdef CONFIG_SPU_BASE 839 spu_flush_all_slbs(mm); 840#endif 841 if (get_paca_psize(addr) != MMU_PAGE_4K) { 842 get_paca()->context = mm->context; 843 slb_flush_and_rebolt(); 844 } 845} 846#endif /* CONFIG_PPC_64K_PAGES */ 847 848#ifdef CONFIG_PPC_SUBPAGE_PROT 849/* 850 * This looks up a 2-bit protection code for a 4k subpage of a 64k page. 851 * Userspace sets the subpage permissions using the subpage_prot system call. 852 * 853 * Result is 0: full permissions, _PAGE_RW: read-only, 854 * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access. 855 */ 856static int subpage_protection(struct mm_struct *mm, unsigned long ea) 857{ 858 struct subpage_prot_table *spt = &mm->context.spt; 859 u32 spp = 0; 860 u32 **sbpm, *sbpp; 861 862 if (ea >= spt->maxaddr) 863 return 0; 864 if (ea < 0x100000000) { 865 /* addresses below 4GB use spt->low_prot */ 866 sbpm = spt->low_prot; 867 } else { 868 sbpm = spt->protptrs[ea >> SBP_L3_SHIFT]; 869 if (!sbpm) 870 return 0; 871 } 872 sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)]; 873 if (!sbpp) 874 return 0; 875 spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)]; 876 877 /* extract 2-bit bitfield for this 4k subpage */ 878 spp >>= 30 - 2 * ((ea >> 12) & 0xf); 879 880 /* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */ 881 spp = ((spp & 2) ? _PAGE_USER : 0) | ((spp & 1) ? _PAGE_RW : 0); 882 return spp; 883} 884 885#else /* CONFIG_PPC_SUBPAGE_PROT */ 886static inline int subpage_protection(struct mm_struct *mm, unsigned long ea) 887{ 888 return 0; 889} 890#endif 891 892void hash_failure_debug(unsigned long ea, unsigned long access, 893 unsigned long vsid, unsigned long trap, 894 int ssize, int psize, unsigned long pte) 895{ 896 if (!printk_ratelimit()) 897 return; 898 pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n", 899 ea, access, current->comm); 900 pr_info(" trap=0x%lx vsid=0x%lx ssize=%d psize=%d pte=0x%lx\n", 901 trap, vsid, ssize, psize, pte); 902} 903 904/* Result code is: 905 * 0 - handled 906 * 1 - normal page fault 907 * -1 - critical hash insertion error 908 * -2 - access not permitted by subpage protection mechanism 909 */ 910int hash_page(unsigned long ea, unsigned long access, unsigned long trap) 911{ 912 pgd_t *pgdir; 913 unsigned long vsid; 914 struct mm_struct *mm; 915 pte_t *ptep; 916 unsigned hugeshift; 917 const struct cpumask *tmp; 918 int rc, user_region = 0, local = 0; 919 int psize, ssize; 920 921 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", 922 ea, access, trap); 923 924 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) { 925 DBG_LOW(" out of pgtable range !\n"); 926 return 1; 927 } 928 929 /* Get region & vsid */ 930 switch (REGION_ID(ea)) { 931 case USER_REGION_ID: 932 user_region = 1; 933 mm = current->mm; 934 if (! mm) { 935 DBG_LOW(" user region with no mm !\n"); 936 return 1; 937 } 938 psize = get_slice_psize(mm, ea); 939 ssize = user_segment_size(ea); 940 vsid = get_vsid(mm->context.id, ea, ssize); 941 break; 942 case VMALLOC_REGION_ID: 943 mm = &init_mm; 944 vsid = get_kernel_vsid(ea, mmu_kernel_ssize); 945 if (ea < VMALLOC_END) 946 psize = mmu_vmalloc_psize; 947 else 948 psize = mmu_io_psize; 949 ssize = mmu_kernel_ssize; 950 break; 951 default: 952 /* Not a valid range 953 * Send the problem up to do_page_fault 954 */ 955 return 1; 956 } 957 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); 958 959 /* Get pgdir */ 960 pgdir = mm->pgd; 961 if (pgdir == NULL) 962 return 1; 963 964 /* Check CPU locality */ 965 tmp = cpumask_of(smp_processor_id()); 966 if (user_region && cpumask_equal(mm_cpumask(mm), tmp)) 967 local = 1; 968 969#ifndef CONFIG_PPC_64K_PAGES 970 /* If we use 4K pages and our psize is not 4K, then we might 971 * be hitting a special driver mapping, and need to align the 972 * address before we fetch the PTE. 973 * 974 * It could also be a hugepage mapping, in which case this is 975 * not necessary, but it's not harmful, either. 976 */ 977 if (psize != MMU_PAGE_4K) 978 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 979#endif /* CONFIG_PPC_64K_PAGES */ 980 981 /* Get PTE and page size from page tables */ 982 ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); 983 if (ptep == NULL || !pte_present(*ptep)) { 984 DBG_LOW(" no PTE !\n"); 985 return 1; 986 } 987 988 /* Add _PAGE_PRESENT to the required access perm */ 989 access |= _PAGE_PRESENT; 990 991 /* Pre-check access permissions (will be re-checked atomically 992 * in __hash_page_XX but this pre-check is a fast path 993 */ 994 if (access & ~pte_val(*ptep)) { 995 DBG_LOW(" no access !\n"); 996 return 1; 997 } 998 999#ifdef CONFIG_HUGETLB_PAGE 1000 if (hugeshift) 1001 return __hash_page_huge(ea, access, vsid, ptep, trap, local, 1002 ssize, hugeshift, psize); 1003#endif /* CONFIG_HUGETLB_PAGE */ 1004 1005#ifndef CONFIG_PPC_64K_PAGES 1006 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep)); 1007#else 1008 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep), 1009 pte_val(*(ptep + PTRS_PER_PTE))); 1010#endif 1011 /* Do actual hashing */ 1012#ifdef CONFIG_PPC_64K_PAGES 1013 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */ 1014 if ((pte_val(*ptep) & _PAGE_4K_PFN) && psize == MMU_PAGE_64K) { 1015 demote_segment_4k(mm, ea); 1016 psize = MMU_PAGE_4K; 1017 } 1018 1019 /* If this PTE is non-cacheable and we have restrictions on 1020 * using non cacheable large pages, then we switch to 4k 1021 */ 1022 if (mmu_ci_restrictions && psize == MMU_PAGE_64K && 1023 (pte_val(*ptep) & _PAGE_NO_CACHE)) { 1024 if (user_region) { 1025 demote_segment_4k(mm, ea); 1026 psize = MMU_PAGE_4K; 1027 } else if (ea < VMALLOC_END) { 1028 /* 1029 * some driver did a non-cacheable mapping 1030 * in vmalloc space, so switch vmalloc 1031 * to 4k pages 1032 */ 1033 printk(KERN_ALERT "Reducing vmalloc segment " 1034 "to 4kB pages because of " 1035 "non-cacheable mapping\n"); 1036 psize = mmu_vmalloc_psize = MMU_PAGE_4K; 1037#ifdef CONFIG_SPU_BASE 1038 spu_flush_all_slbs(mm); 1039#endif 1040 } 1041 } 1042 if (user_region) { 1043 if (psize != get_paca_psize(ea)) { 1044 get_paca()->context = mm->context; 1045 slb_flush_and_rebolt(); 1046 } 1047 } else if (get_paca()->vmalloc_sllp != 1048 mmu_psize_defs[mmu_vmalloc_psize].sllp) { 1049 get_paca()->vmalloc_sllp = 1050 mmu_psize_defs[mmu_vmalloc_psize].sllp; 1051 slb_vmalloc_update(); 1052 } 1053#endif /* CONFIG_PPC_64K_PAGES */ 1054 1055#ifdef CONFIG_PPC_HAS_HASH_64K 1056 if (psize == MMU_PAGE_64K) 1057 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); 1058 else 1059#endif /* CONFIG_PPC_HAS_HASH_64K */ 1060 { 1061 int spp = subpage_protection(mm, ea); 1062 if (access & spp) 1063 rc = -2; 1064 else 1065 rc = __hash_page_4K(ea, access, vsid, ptep, trap, 1066 local, ssize, spp); 1067 } 1068 1069 /* Dump some info in case of hash insertion failure, they should 1070 * never happen so it is really useful to know if/when they do 1071 */ 1072 if (rc == -1) 1073 hash_failure_debug(ea, access, vsid, trap, ssize, psize, 1074 pte_val(*ptep)); 1075#ifndef CONFIG_PPC_64K_PAGES 1076 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); 1077#else 1078 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep), 1079 pte_val(*(ptep + PTRS_PER_PTE))); 1080#endif 1081 DBG_LOW(" -> rc=%d\n", rc); 1082 return rc; 1083} 1084EXPORT_SYMBOL_GPL(hash_page); 1085 1086void hash_preload(struct mm_struct *mm, unsigned long ea, 1087 unsigned long access, unsigned long trap) 1088{ 1089 unsigned long vsid; 1090 pgd_t *pgdir; 1091 pte_t *ptep; 1092 unsigned long flags; 1093 int rc, ssize, local = 0; 1094 1095 BUG_ON(REGION_ID(ea) != USER_REGION_ID); 1096 1097#ifdef CONFIG_PPC_MM_SLICES 1098 /* We only prefault standard pages for now */ 1099 if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize)) 1100 return; 1101#endif 1102 1103 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx," 1104 " trap=%lx\n", mm, mm->pgd, ea, access, trap); 1105 1106 /* Get Linux PTE if available */ 1107 pgdir = mm->pgd; 1108 if (pgdir == NULL) 1109 return; 1110 ptep = find_linux_pte(pgdir, ea); 1111 if (!ptep) 1112 return; 1113 1114#ifdef CONFIG_PPC_64K_PAGES 1115 /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on 1116 * a 64K kernel), then we don't preload, hash_page() will take 1117 * care of it once we actually try to access the page. 1118 * That way we don't have to duplicate all of the logic for segment 1119 * page size demotion here 1120 */ 1121 if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE)) 1122 return; 1123#endif /* CONFIG_PPC_64K_PAGES */ 1124 1125 /* Get VSID */ 1126 ssize = user_segment_size(ea); 1127 vsid = get_vsid(mm->context.id, ea, ssize); 1128 1129 /* Hash doesn't like irqs */ 1130 local_irq_save(flags); 1131 1132 /* Is that local to this CPU ? */ 1133 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) 1134 local = 1; 1135 1136 /* Hash it in */ 1137#ifdef CONFIG_PPC_HAS_HASH_64K 1138 if (mm->context.user_psize == MMU_PAGE_64K) 1139 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); 1140 else 1141#endif /* CONFIG_PPC_HAS_HASH_64K */ 1142 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, 1143 subpage_protection(mm, ea)); 1144 1145 /* Dump some info in case of hash insertion failure, they should 1146 * never happen so it is really useful to know if/when they do 1147 */ 1148 if (rc == -1) 1149 hash_failure_debug(ea, access, vsid, trap, ssize, 1150 mm->context.user_psize, pte_val(*ptep)); 1151 1152 local_irq_restore(flags); 1153} 1154 1155/* WARNING: This is called from hash_low_64.S, if you change this prototype, 1156 * do not forget to update the assembly call site ! 1157 */ 1158void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, 1159 int local) 1160{ 1161 unsigned long hash, index, shift, hidx, slot; 1162 1163 DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn); 1164 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { 1165 hash = hpt_hash(vpn, shift, ssize); 1166 hidx = __rpte_to_hidx(pte, index); 1167 if (hidx & _PTEIDX_SECONDARY) 1168 hash = ~hash; 1169 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1170 slot += hidx & _PTEIDX_GROUP_IX; 1171 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx); 1172 ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local); 1173 } pte_iterate_hashed_end(); 1174} 1175 1176void flush_hash_range(unsigned long number, int local) 1177{ 1178 if (ppc_md.flush_hash_range) 1179 ppc_md.flush_hash_range(number, local); 1180 else { 1181 int i; 1182 struct ppc64_tlb_batch *batch = 1183 &__get_cpu_var(ppc64_tlb_batch); 1184 1185 for (i = 0; i < number; i++) 1186 flush_hash_page(batch->vpn[i], batch->pte[i], 1187 batch->psize, batch->ssize, local); 1188 } 1189} 1190 1191/* 1192 * low_hash_fault is called when we the low level hash code failed 1193 * to instert a PTE due to an hypervisor error 1194 */ 1195void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) 1196{ 1197 if (user_mode(regs)) { 1198#ifdef CONFIG_PPC_SUBPAGE_PROT 1199 if (rc == -2) 1200 _exception(SIGSEGV, regs, SEGV_ACCERR, address); 1201 else 1202#endif 1203 _exception(SIGBUS, regs, BUS_ADRERR, address); 1204 } else 1205 bad_page_fault(regs, address, SIGBUS); 1206} 1207 1208#ifdef CONFIG_DEBUG_PAGEALLOC 1209static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) 1210{ 1211 unsigned long hash, hpteg; 1212 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1213 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); 1214 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL); 1215 int ret; 1216 1217 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); 1218 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 1219 1220 ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), 1221 mode, HPTE_V_BOLTED, 1222 mmu_linear_psize, mmu_kernel_ssize); 1223 BUG_ON (ret < 0); 1224 spin_lock(&linear_map_hash_lock); 1225 BUG_ON(linear_map_hash_slots[lmi] & 0x80); 1226 linear_map_hash_slots[lmi] = ret | 0x80; 1227 spin_unlock(&linear_map_hash_lock); 1228} 1229 1230static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) 1231{ 1232 unsigned long hash, hidx, slot; 1233 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1234 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); 1235 1236 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); 1237 spin_lock(&linear_map_hash_lock); 1238 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); 1239 hidx = linear_map_hash_slots[lmi] & 0x7f; 1240 linear_map_hash_slots[lmi] = 0; 1241 spin_unlock(&linear_map_hash_lock); 1242 if (hidx & _PTEIDX_SECONDARY) 1243 hash = ~hash; 1244 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1245 slot += hidx & _PTEIDX_GROUP_IX; 1246 ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_kernel_ssize, 0); 1247} 1248 1249void kernel_map_pages(struct page *page, int numpages, int enable) 1250{ 1251 unsigned long flags, vaddr, lmi; 1252 int i; 1253 1254 local_irq_save(flags); 1255 for (i = 0; i < numpages; i++, page++) { 1256 vaddr = (unsigned long)page_address(page); 1257 lmi = __pa(vaddr) >> PAGE_SHIFT; 1258 if (lmi >= linear_map_hash_count) 1259 continue; 1260 if (enable) 1261 kernel_map_linear_page(vaddr, lmi); 1262 else 1263 kernel_unmap_linear_page(vaddr, lmi); 1264 } 1265 local_irq_restore(flags); 1266} 1267#endif /* CONFIG_DEBUG_PAGEALLOC */ 1268 1269void setup_initial_memory_limit(phys_addr_t first_memblock_base, 1270 phys_addr_t first_memblock_size) 1271{ 1272 /* We don't currently support the first MEMBLOCK not mapping 0 1273 * physical on those processors 1274 */ 1275 BUG_ON(first_memblock_base != 0); 1276 1277 /* On LPAR systems, the first entry is our RMA region, 1278 * non-LPAR 64-bit hash MMU systems don't have a limitation 1279 * on real mode access, but using the first entry works well 1280 * enough. We also clamp it to 1G to avoid some funky things 1281 * such as RTAS bugs etc... 1282 */ 1283 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); 1284 1285 /* Finally limit subsequent allocations */ 1286 memblock_set_current_limit(ppc64_rma_size); 1287}