Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.3-rc2 1278 lines 35 kB view raw
1/* 2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen 3 * {mikejc|engebret}@us.ibm.com 4 * 5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com> 6 * 7 * SMP scalability work: 8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 9 * 10 * Module name: htab.c 11 * 12 * Description: 13 * PowerPC Hashed Page Table functions 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 */ 20 21#undef DEBUG 22#undef DEBUG_LOW 23 24#include <linux/spinlock.h> 25#include <linux/errno.h> 26#include <linux/sched.h> 27#include <linux/proc_fs.h> 28#include <linux/stat.h> 29#include <linux/sysctl.h> 30#include <linux/export.h> 31#include <linux/ctype.h> 32#include <linux/cache.h> 33#include <linux/init.h> 34#include <linux/signal.h> 35#include <linux/memblock.h> 36 37#include <asm/processor.h> 38#include <asm/pgtable.h> 39#include <asm/mmu.h> 40#include <asm/mmu_context.h> 41#include <asm/page.h> 42#include <asm/types.h> 43#include <asm/system.h> 44#include <asm/uaccess.h> 45#include <asm/machdep.h> 46#include <asm/prom.h> 47#include <asm/abs_addr.h> 48#include <asm/tlbflush.h> 49#include <asm/io.h> 50#include <asm/eeh.h> 51#include <asm/tlb.h> 52#include <asm/cacheflush.h> 53#include <asm/cputable.h> 54#include <asm/sections.h> 55#include <asm/spu.h> 56#include <asm/udbg.h> 57#include <asm/code-patching.h> 58 59#ifdef DEBUG 60#define DBG(fmt...) udbg_printf(fmt) 61#else 62#define DBG(fmt...) 63#endif 64 65#ifdef DEBUG_LOW 66#define DBG_LOW(fmt...) udbg_printf(fmt) 67#else 68#define DBG_LOW(fmt...) 69#endif 70 71#define KB (1024) 72#define MB (1024*KB) 73#define GB (1024L*MB) 74 75/* 76 * Note: pte --> Linux PTE 77 * HPTE --> PowerPC Hashed Page Table Entry 78 * 79 * Execution context: 80 * htab_initialize is called with the MMU off (of course), but 81 * the kernel has been copied down to zero so it can directly 82 * reference global data. At this point it is very difficult 83 * to print debug info. 84 * 85 */ 86 87#ifdef CONFIG_U3_DART 88extern unsigned long dart_tablebase; 89#endif /* CONFIG_U3_DART */ 90 91static unsigned long _SDR1; 92struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; 93 94struct hash_pte *htab_address; 95unsigned long htab_size_bytes; 96unsigned long htab_hash_mask; 97EXPORT_SYMBOL_GPL(htab_hash_mask); 98int mmu_linear_psize = MMU_PAGE_4K; 99int mmu_virtual_psize = MMU_PAGE_4K; 100int mmu_vmalloc_psize = MMU_PAGE_4K; 101#ifdef CONFIG_SPARSEMEM_VMEMMAP 102int mmu_vmemmap_psize = MMU_PAGE_4K; 103#endif 104int mmu_io_psize = MMU_PAGE_4K; 105int mmu_kernel_ssize = MMU_SEGSIZE_256M; 106int mmu_highuser_ssize = MMU_SEGSIZE_256M; 107u16 mmu_slb_size = 64; 108EXPORT_SYMBOL_GPL(mmu_slb_size); 109#ifdef CONFIG_PPC_64K_PAGES 110int mmu_ci_restrictions; 111#endif 112#ifdef CONFIG_DEBUG_PAGEALLOC 113static u8 *linear_map_hash_slots; 114static unsigned long linear_map_hash_count; 115static DEFINE_SPINLOCK(linear_map_hash_lock); 116#endif /* CONFIG_DEBUG_PAGEALLOC */ 117 118/* There are definitions of page sizes arrays to be used when none 119 * is provided by the firmware. 120 */ 121 122/* Pre-POWER4 CPUs (4k pages only) 123 */ 124static struct mmu_psize_def mmu_psize_defaults_old[] = { 125 [MMU_PAGE_4K] = { 126 .shift = 12, 127 .sllp = 0, 128 .penc = 0, 129 .avpnm = 0, 130 .tlbiel = 0, 131 }, 132}; 133 134/* POWER4, GPUL, POWER5 135 * 136 * Support for 16Mb large pages 137 */ 138static struct mmu_psize_def mmu_psize_defaults_gp[] = { 139 [MMU_PAGE_4K] = { 140 .shift = 12, 141 .sllp = 0, 142 .penc = 0, 143 .avpnm = 0, 144 .tlbiel = 1, 145 }, 146 [MMU_PAGE_16M] = { 147 .shift = 24, 148 .sllp = SLB_VSID_L, 149 .penc = 0, 150 .avpnm = 0x1UL, 151 .tlbiel = 0, 152 }, 153}; 154 155static unsigned long htab_convert_pte_flags(unsigned long pteflags) 156{ 157 unsigned long rflags = pteflags & 0x1fa; 158 159 /* _PAGE_EXEC -> NOEXEC */ 160 if ((pteflags & _PAGE_EXEC) == 0) 161 rflags |= HPTE_R_N; 162 163 /* PP bits. PAGE_USER is already PP bit 0x2, so we only 164 * need to add in 0x1 if it's a read-only user page 165 */ 166 if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) && 167 (pteflags & _PAGE_DIRTY))) 168 rflags |= 1; 169 170 /* Always add C */ 171 return rflags | HPTE_R_C; 172} 173 174int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 175 unsigned long pstart, unsigned long prot, 176 int psize, int ssize) 177{ 178 unsigned long vaddr, paddr; 179 unsigned int step, shift; 180 int ret = 0; 181 182 shift = mmu_psize_defs[psize].shift; 183 step = 1 << shift; 184 185 prot = htab_convert_pte_flags(prot); 186 187 DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n", 188 vstart, vend, pstart, prot, psize, ssize); 189 190 for (vaddr = vstart, paddr = pstart; vaddr < vend; 191 vaddr += step, paddr += step) { 192 unsigned long hash, hpteg; 193 unsigned long vsid = get_kernel_vsid(vaddr, ssize); 194 unsigned long va = hpt_va(vaddr, vsid, ssize); 195 unsigned long tprot = prot; 196 197 /* Make kernel text executable */ 198 if (overlaps_kernel_text(vaddr, vaddr + step)) 199 tprot &= ~HPTE_R_N; 200 201 hash = hpt_hash(va, shift, ssize); 202 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 203 204 BUG_ON(!ppc_md.hpte_insert); 205 ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot, 206 HPTE_V_BOLTED, psize, ssize); 207 208 if (ret < 0) 209 break; 210#ifdef CONFIG_DEBUG_PAGEALLOC 211 if ((paddr >> PAGE_SHIFT) < linear_map_hash_count) 212 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; 213#endif /* CONFIG_DEBUG_PAGEALLOC */ 214 } 215 return ret < 0 ? ret : 0; 216} 217 218#ifdef CONFIG_MEMORY_HOTPLUG 219static int htab_remove_mapping(unsigned long vstart, unsigned long vend, 220 int psize, int ssize) 221{ 222 unsigned long vaddr; 223 unsigned int step, shift; 224 225 shift = mmu_psize_defs[psize].shift; 226 step = 1 << shift; 227 228 if (!ppc_md.hpte_removebolted) { 229 printk(KERN_WARNING "Platform doesn't implement " 230 "hpte_removebolted\n"); 231 return -EINVAL; 232 } 233 234 for (vaddr = vstart; vaddr < vend; vaddr += step) 235 ppc_md.hpte_removebolted(vaddr, psize, ssize); 236 237 return 0; 238} 239#endif /* CONFIG_MEMORY_HOTPLUG */ 240 241static int __init htab_dt_scan_seg_sizes(unsigned long node, 242 const char *uname, int depth, 243 void *data) 244{ 245 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 246 u32 *prop; 247 unsigned long size = 0; 248 249 /* We are scanning "cpu" nodes only */ 250 if (type == NULL || strcmp(type, "cpu") != 0) 251 return 0; 252 253 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", 254 &size); 255 if (prop == NULL) 256 return 0; 257 for (; size >= 4; size -= 4, ++prop) { 258 if (prop[0] == 40) { 259 DBG("1T segment support detected\n"); 260 cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT; 261 return 1; 262 } 263 } 264 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; 265 return 0; 266} 267 268static void __init htab_init_seg_sizes(void) 269{ 270 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL); 271} 272 273static int __init htab_dt_scan_page_sizes(unsigned long node, 274 const char *uname, int depth, 275 void *data) 276{ 277 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 278 u32 *prop; 279 unsigned long size = 0; 280 281 /* We are scanning "cpu" nodes only */ 282 if (type == NULL || strcmp(type, "cpu") != 0) 283 return 0; 284 285 prop = (u32 *)of_get_flat_dt_prop(node, 286 "ibm,segment-page-sizes", &size); 287 if (prop != NULL) { 288 DBG("Page sizes from device-tree:\n"); 289 size /= 4; 290 cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); 291 while(size > 0) { 292 unsigned int shift = prop[0]; 293 unsigned int slbenc = prop[1]; 294 unsigned int lpnum = prop[2]; 295 unsigned int lpenc = 0; 296 struct mmu_psize_def *def; 297 int idx = -1; 298 299 size -= 3; prop += 3; 300 while(size > 0 && lpnum) { 301 if (prop[0] == shift) 302 lpenc = prop[1]; 303 prop += 2; size -= 2; 304 lpnum--; 305 } 306 switch(shift) { 307 case 0xc: 308 idx = MMU_PAGE_4K; 309 break; 310 case 0x10: 311 idx = MMU_PAGE_64K; 312 break; 313 case 0x14: 314 idx = MMU_PAGE_1M; 315 break; 316 case 0x18: 317 idx = MMU_PAGE_16M; 318 cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE; 319 break; 320 case 0x22: 321 idx = MMU_PAGE_16G; 322 break; 323 } 324 if (idx < 0) 325 continue; 326 def = &mmu_psize_defs[idx]; 327 def->shift = shift; 328 if (shift <= 23) 329 def->avpnm = 0; 330 else 331 def->avpnm = (1 << (shift - 23)) - 1; 332 def->sllp = slbenc; 333 def->penc = lpenc; 334 /* We don't know for sure what's up with tlbiel, so 335 * for now we only set it for 4K and 64K pages 336 */ 337 if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K) 338 def->tlbiel = 1; 339 else 340 def->tlbiel = 0; 341 342 DBG(" %d: shift=%02x, sllp=%04lx, avpnm=%08lx, " 343 "tlbiel=%d, penc=%d\n", 344 idx, shift, def->sllp, def->avpnm, def->tlbiel, 345 def->penc); 346 } 347 return 1; 348 } 349 return 0; 350} 351 352#ifdef CONFIG_HUGETLB_PAGE 353/* Scan for 16G memory blocks that have been set aside for huge pages 354 * and reserve those blocks for 16G huge pages. 355 */ 356static int __init htab_dt_scan_hugepage_blocks(unsigned long node, 357 const char *uname, int depth, 358 void *data) { 359 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 360 unsigned long *addr_prop; 361 u32 *page_count_prop; 362 unsigned int expected_pages; 363 long unsigned int phys_addr; 364 long unsigned int block_size; 365 366 /* We are scanning "memory" nodes only */ 367 if (type == NULL || strcmp(type, "memory") != 0) 368 return 0; 369 370 /* This property is the log base 2 of the number of virtual pages that 371 * will represent this memory block. */ 372 page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL); 373 if (page_count_prop == NULL) 374 return 0; 375 expected_pages = (1 << page_count_prop[0]); 376 addr_prop = of_get_flat_dt_prop(node, "reg", NULL); 377 if (addr_prop == NULL) 378 return 0; 379 phys_addr = addr_prop[0]; 380 block_size = addr_prop[1]; 381 if (block_size != (16 * GB)) 382 return 0; 383 printk(KERN_INFO "Huge page(16GB) memory: " 384 "addr = 0x%lX size = 0x%lX pages = %d\n", 385 phys_addr, block_size, expected_pages); 386 if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) { 387 memblock_reserve(phys_addr, block_size * expected_pages); 388 add_gpage(phys_addr, block_size, expected_pages); 389 } 390 return 0; 391} 392#endif /* CONFIG_HUGETLB_PAGE */ 393 394static void __init htab_init_page_sizes(void) 395{ 396 int rc; 397 398 /* Default to 4K pages only */ 399 memcpy(mmu_psize_defs, mmu_psize_defaults_old, 400 sizeof(mmu_psize_defaults_old)); 401 402 /* 403 * Try to find the available page sizes in the device-tree 404 */ 405 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL); 406 if (rc != 0) /* Found */ 407 goto found; 408 409 /* 410 * Not in the device-tree, let's fallback on known size 411 * list for 16M capable GP & GR 412 */ 413 if (mmu_has_feature(MMU_FTR_16M_PAGE)) 414 memcpy(mmu_psize_defs, mmu_psize_defaults_gp, 415 sizeof(mmu_psize_defaults_gp)); 416 found: 417#ifndef CONFIG_DEBUG_PAGEALLOC 418 /* 419 * Pick a size for the linear mapping. Currently, we only support 420 * 16M, 1M and 4K which is the default 421 */ 422 if (mmu_psize_defs[MMU_PAGE_16M].shift) 423 mmu_linear_psize = MMU_PAGE_16M; 424 else if (mmu_psize_defs[MMU_PAGE_1M].shift) 425 mmu_linear_psize = MMU_PAGE_1M; 426#endif /* CONFIG_DEBUG_PAGEALLOC */ 427 428#ifdef CONFIG_PPC_64K_PAGES 429 /* 430 * Pick a size for the ordinary pages. Default is 4K, we support 431 * 64K for user mappings and vmalloc if supported by the processor. 432 * We only use 64k for ioremap if the processor 433 * (and firmware) support cache-inhibited large pages. 434 * If not, we use 4k and set mmu_ci_restrictions so that 435 * hash_page knows to switch processes that use cache-inhibited 436 * mappings to 4k pages. 437 */ 438 if (mmu_psize_defs[MMU_PAGE_64K].shift) { 439 mmu_virtual_psize = MMU_PAGE_64K; 440 mmu_vmalloc_psize = MMU_PAGE_64K; 441 if (mmu_linear_psize == MMU_PAGE_4K) 442 mmu_linear_psize = MMU_PAGE_64K; 443 if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) { 444 /* 445 * Don't use 64k pages for ioremap on pSeries, since 446 * that would stop us accessing the HEA ethernet. 447 */ 448 if (!machine_is(pseries)) 449 mmu_io_psize = MMU_PAGE_64K; 450 } else 451 mmu_ci_restrictions = 1; 452 } 453#endif /* CONFIG_PPC_64K_PAGES */ 454 455#ifdef CONFIG_SPARSEMEM_VMEMMAP 456 /* We try to use 16M pages for vmemmap if that is supported 457 * and we have at least 1G of RAM at boot 458 */ 459 if (mmu_psize_defs[MMU_PAGE_16M].shift && 460 memblock_phys_mem_size() >= 0x40000000) 461 mmu_vmemmap_psize = MMU_PAGE_16M; 462 else if (mmu_psize_defs[MMU_PAGE_64K].shift) 463 mmu_vmemmap_psize = MMU_PAGE_64K; 464 else 465 mmu_vmemmap_psize = MMU_PAGE_4K; 466#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 467 468 printk(KERN_DEBUG "Page orders: linear mapping = %d, " 469 "virtual = %d, io = %d" 470#ifdef CONFIG_SPARSEMEM_VMEMMAP 471 ", vmemmap = %d" 472#endif 473 "\n", 474 mmu_psize_defs[mmu_linear_psize].shift, 475 mmu_psize_defs[mmu_virtual_psize].shift, 476 mmu_psize_defs[mmu_io_psize].shift 477#ifdef CONFIG_SPARSEMEM_VMEMMAP 478 ,mmu_psize_defs[mmu_vmemmap_psize].shift 479#endif 480 ); 481 482#ifdef CONFIG_HUGETLB_PAGE 483 /* Reserve 16G huge page memory sections for huge pages */ 484 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); 485#endif /* CONFIG_HUGETLB_PAGE */ 486} 487 488static int __init htab_dt_scan_pftsize(unsigned long node, 489 const char *uname, int depth, 490 void *data) 491{ 492 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 493 u32 *prop; 494 495 /* We are scanning "cpu" nodes only */ 496 if (type == NULL || strcmp(type, "cpu") != 0) 497 return 0; 498 499 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL); 500 if (prop != NULL) { 501 /* pft_size[0] is the NUMA CEC cookie */ 502 ppc64_pft_size = prop[1]; 503 return 1; 504 } 505 return 0; 506} 507 508static unsigned long __init htab_get_table_size(void) 509{ 510 unsigned long mem_size, rnd_mem_size, pteg_count, psize; 511 512 /* If hash size isn't already provided by the platform, we try to 513 * retrieve it from the device-tree. If it's not there neither, we 514 * calculate it now based on the total RAM size 515 */ 516 if (ppc64_pft_size == 0) 517 of_scan_flat_dt(htab_dt_scan_pftsize, NULL); 518 if (ppc64_pft_size) 519 return 1UL << ppc64_pft_size; 520 521 /* round mem_size up to next power of 2 */ 522 mem_size = memblock_phys_mem_size(); 523 rnd_mem_size = 1UL << __ilog2(mem_size); 524 if (rnd_mem_size < mem_size) 525 rnd_mem_size <<= 1; 526 527 /* # pages / 2 */ 528 psize = mmu_psize_defs[mmu_virtual_psize].shift; 529 pteg_count = max(rnd_mem_size >> (psize + 1), 1UL << 11); 530 531 return pteg_count << 7; 532} 533 534#ifdef CONFIG_MEMORY_HOTPLUG 535int create_section_mapping(unsigned long start, unsigned long end) 536{ 537 return htab_bolt_mapping(start, end, __pa(start), 538 pgprot_val(PAGE_KERNEL), mmu_linear_psize, 539 mmu_kernel_ssize); 540} 541 542int remove_section_mapping(unsigned long start, unsigned long end) 543{ 544 return htab_remove_mapping(start, end, mmu_linear_psize, 545 mmu_kernel_ssize); 546} 547#endif /* CONFIG_MEMORY_HOTPLUG */ 548 549#define FUNCTION_TEXT(A) ((*(unsigned long *)(A))) 550 551static void __init htab_finish_init(void) 552{ 553 extern unsigned int *htab_call_hpte_insert1; 554 extern unsigned int *htab_call_hpte_insert2; 555 extern unsigned int *htab_call_hpte_remove; 556 extern unsigned int *htab_call_hpte_updatepp; 557 558#ifdef CONFIG_PPC_HAS_HASH_64K 559 extern unsigned int *ht64_call_hpte_insert1; 560 extern unsigned int *ht64_call_hpte_insert2; 561 extern unsigned int *ht64_call_hpte_remove; 562 extern unsigned int *ht64_call_hpte_updatepp; 563 564 patch_branch(ht64_call_hpte_insert1, 565 FUNCTION_TEXT(ppc_md.hpte_insert), 566 BRANCH_SET_LINK); 567 patch_branch(ht64_call_hpte_insert2, 568 FUNCTION_TEXT(ppc_md.hpte_insert), 569 BRANCH_SET_LINK); 570 patch_branch(ht64_call_hpte_remove, 571 FUNCTION_TEXT(ppc_md.hpte_remove), 572 BRANCH_SET_LINK); 573 patch_branch(ht64_call_hpte_updatepp, 574 FUNCTION_TEXT(ppc_md.hpte_updatepp), 575 BRANCH_SET_LINK); 576 577#endif /* CONFIG_PPC_HAS_HASH_64K */ 578 579 patch_branch(htab_call_hpte_insert1, 580 FUNCTION_TEXT(ppc_md.hpte_insert), 581 BRANCH_SET_LINK); 582 patch_branch(htab_call_hpte_insert2, 583 FUNCTION_TEXT(ppc_md.hpte_insert), 584 BRANCH_SET_LINK); 585 patch_branch(htab_call_hpte_remove, 586 FUNCTION_TEXT(ppc_md.hpte_remove), 587 BRANCH_SET_LINK); 588 patch_branch(htab_call_hpte_updatepp, 589 FUNCTION_TEXT(ppc_md.hpte_updatepp), 590 BRANCH_SET_LINK); 591} 592 593static void __init htab_initialize(void) 594{ 595 unsigned long table; 596 unsigned long pteg_count; 597 unsigned long prot; 598 unsigned long base = 0, size = 0, limit; 599 struct memblock_region *reg; 600 601 DBG(" -> htab_initialize()\n"); 602 603 /* Initialize segment sizes */ 604 htab_init_seg_sizes(); 605 606 /* Initialize page sizes */ 607 htab_init_page_sizes(); 608 609 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { 610 mmu_kernel_ssize = MMU_SEGSIZE_1T; 611 mmu_highuser_ssize = MMU_SEGSIZE_1T; 612 printk(KERN_INFO "Using 1TB segments\n"); 613 } 614 615 /* 616 * Calculate the required size of the htab. We want the number of 617 * PTEGs to equal one half the number of real pages. 618 */ 619 htab_size_bytes = htab_get_table_size(); 620 pteg_count = htab_size_bytes >> 7; 621 622 htab_hash_mask = pteg_count - 1; 623 624 if (firmware_has_feature(FW_FEATURE_LPAR)) { 625 /* Using a hypervisor which owns the htab */ 626 htab_address = NULL; 627 _SDR1 = 0; 628 } else { 629 /* Find storage for the HPT. Must be contiguous in 630 * the absolute address space. On cell we want it to be 631 * in the first 2 Gig so we can use it for IOMMU hacks. 632 */ 633 if (machine_is(cell)) 634 limit = 0x80000000; 635 else 636 limit = MEMBLOCK_ALLOC_ANYWHERE; 637 638 table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); 639 640 DBG("Hash table allocated at %lx, size: %lx\n", table, 641 htab_size_bytes); 642 643 htab_address = abs_to_virt(table); 644 645 /* htab absolute addr + encoded htabsize */ 646 _SDR1 = table + __ilog2(pteg_count) - 11; 647 648 /* Initialize the HPT with no entries */ 649 memset((void *)table, 0, htab_size_bytes); 650 651 /* Set SDR1 */ 652 mtspr(SPRN_SDR1, _SDR1); 653 } 654 655 prot = pgprot_val(PAGE_KERNEL); 656 657#ifdef CONFIG_DEBUG_PAGEALLOC 658 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; 659 linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, 660 1, ppc64_rma_size)); 661 memset(linear_map_hash_slots, 0, linear_map_hash_count); 662#endif /* CONFIG_DEBUG_PAGEALLOC */ 663 664 /* On U3 based machines, we need to reserve the DART area and 665 * _NOT_ map it to avoid cache paradoxes as it's remapped non 666 * cacheable later on 667 */ 668 669 /* create bolted the linear mapping in the hash table */ 670 for_each_memblock(memory, reg) { 671 base = (unsigned long)__va(reg->base); 672 size = reg->size; 673 674 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", 675 base, size, prot); 676 677#ifdef CONFIG_U3_DART 678 /* Do not map the DART space. Fortunately, it will be aligned 679 * in such a way that it will not cross two memblock regions and 680 * will fit within a single 16Mb page. 681 * The DART space is assumed to be a full 16Mb region even if 682 * we only use 2Mb of that space. We will use more of it later 683 * for AGP GART. We have to use a full 16Mb large page. 684 */ 685 DBG("DART base: %lx\n", dart_tablebase); 686 687 if (dart_tablebase != 0 && dart_tablebase >= base 688 && dart_tablebase < (base + size)) { 689 unsigned long dart_table_end = dart_tablebase + 16 * MB; 690 if (base != dart_tablebase) 691 BUG_ON(htab_bolt_mapping(base, dart_tablebase, 692 __pa(base), prot, 693 mmu_linear_psize, 694 mmu_kernel_ssize)); 695 if ((base + size) > dart_table_end) 696 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB, 697 base + size, 698 __pa(dart_table_end), 699 prot, 700 mmu_linear_psize, 701 mmu_kernel_ssize)); 702 continue; 703 } 704#endif /* CONFIG_U3_DART */ 705 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), 706 prot, mmu_linear_psize, mmu_kernel_ssize)); 707 } 708 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); 709 710 /* 711 * If we have a memory_limit and we've allocated TCEs then we need to 712 * explicitly map the TCE area at the top of RAM. We also cope with the 713 * case that the TCEs start below memory_limit. 714 * tce_alloc_start/end are 16MB aligned so the mapping should work 715 * for either 4K or 16MB pages. 716 */ 717 if (tce_alloc_start) { 718 tce_alloc_start = (unsigned long)__va(tce_alloc_start); 719 tce_alloc_end = (unsigned long)__va(tce_alloc_end); 720 721 if (base + size >= tce_alloc_start) 722 tce_alloc_start = base + size + 1; 723 724 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end, 725 __pa(tce_alloc_start), prot, 726 mmu_linear_psize, mmu_kernel_ssize)); 727 } 728 729 htab_finish_init(); 730 731 DBG(" <- htab_initialize()\n"); 732} 733#undef KB 734#undef MB 735 736void __init early_init_mmu(void) 737{ 738 /* Setup initial STAB address in the PACA */ 739 get_paca()->stab_real = __pa((u64)&initial_stab); 740 get_paca()->stab_addr = (u64)&initial_stab; 741 742 /* Initialize the MMU Hash table and create the linear mapping 743 * of memory. Has to be done before stab/slb initialization as 744 * this is currently where the page size encoding is obtained 745 */ 746 htab_initialize(); 747 748 /* Initialize stab / SLB management except on iSeries 749 */ 750 if (mmu_has_feature(MMU_FTR_SLB)) 751 slb_initialize(); 752 else if (!firmware_has_feature(FW_FEATURE_ISERIES)) 753 stab_initialize(get_paca()->stab_real); 754} 755 756#ifdef CONFIG_SMP 757void __cpuinit early_init_mmu_secondary(void) 758{ 759 /* Initialize hash table for that CPU */ 760 if (!firmware_has_feature(FW_FEATURE_LPAR)) 761 mtspr(SPRN_SDR1, _SDR1); 762 763 /* Initialize STAB/SLB. We use a virtual address as it works 764 * in real mode on pSeries and we want a virtual address on 765 * iSeries anyway 766 */ 767 if (mmu_has_feature(MMU_FTR_SLB)) 768 slb_initialize(); 769 else 770 stab_initialize(get_paca()->stab_addr); 771} 772#endif /* CONFIG_SMP */ 773 774/* 775 * Called by asm hashtable.S for doing lazy icache flush 776 */ 777unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) 778{ 779 struct page *page; 780 781 if (!pfn_valid(pte_pfn(pte))) 782 return pp; 783 784 page = pte_page(pte); 785 786 /* page is dirty */ 787 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { 788 if (trap == 0x400) { 789 flush_dcache_icache_page(page); 790 set_bit(PG_arch_1, &page->flags); 791 } else 792 pp |= HPTE_R_N; 793 } 794 return pp; 795} 796 797#ifdef CONFIG_PPC_MM_SLICES 798unsigned int get_paca_psize(unsigned long addr) 799{ 800 unsigned long index, slices; 801 802 if (addr < SLICE_LOW_TOP) { 803 slices = get_paca()->context.low_slices_psize; 804 index = GET_LOW_SLICE_INDEX(addr); 805 } else { 806 slices = get_paca()->context.high_slices_psize; 807 index = GET_HIGH_SLICE_INDEX(addr); 808 } 809 return (slices >> (index * 4)) & 0xF; 810} 811 812#else 813unsigned int get_paca_psize(unsigned long addr) 814{ 815 return get_paca()->context.user_psize; 816} 817#endif 818 819/* 820 * Demote a segment to using 4k pages. 821 * For now this makes the whole process use 4k pages. 822 */ 823#ifdef CONFIG_PPC_64K_PAGES 824void demote_segment_4k(struct mm_struct *mm, unsigned long addr) 825{ 826 if (get_slice_psize(mm, addr) == MMU_PAGE_4K) 827 return; 828 slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); 829#ifdef CONFIG_SPU_BASE 830 spu_flush_all_slbs(mm); 831#endif 832 if (get_paca_psize(addr) != MMU_PAGE_4K) { 833 get_paca()->context = mm->context; 834 slb_flush_and_rebolt(); 835 } 836} 837#endif /* CONFIG_PPC_64K_PAGES */ 838 839#ifdef CONFIG_PPC_SUBPAGE_PROT 840/* 841 * This looks up a 2-bit protection code for a 4k subpage of a 64k page. 842 * Userspace sets the subpage permissions using the subpage_prot system call. 843 * 844 * Result is 0: full permissions, _PAGE_RW: read-only, 845 * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access. 846 */ 847static int subpage_protection(struct mm_struct *mm, unsigned long ea) 848{ 849 struct subpage_prot_table *spt = &mm->context.spt; 850 u32 spp = 0; 851 u32 **sbpm, *sbpp; 852 853 if (ea >= spt->maxaddr) 854 return 0; 855 if (ea < 0x100000000) { 856 /* addresses below 4GB use spt->low_prot */ 857 sbpm = spt->low_prot; 858 } else { 859 sbpm = spt->protptrs[ea >> SBP_L3_SHIFT]; 860 if (!sbpm) 861 return 0; 862 } 863 sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)]; 864 if (!sbpp) 865 return 0; 866 spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)]; 867 868 /* extract 2-bit bitfield for this 4k subpage */ 869 spp >>= 30 - 2 * ((ea >> 12) & 0xf); 870 871 /* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */ 872 spp = ((spp & 2) ? _PAGE_USER : 0) | ((spp & 1) ? _PAGE_RW : 0); 873 return spp; 874} 875 876#else /* CONFIG_PPC_SUBPAGE_PROT */ 877static inline int subpage_protection(struct mm_struct *mm, unsigned long ea) 878{ 879 return 0; 880} 881#endif 882 883void hash_failure_debug(unsigned long ea, unsigned long access, 884 unsigned long vsid, unsigned long trap, 885 int ssize, int psize, unsigned long pte) 886{ 887 if (!printk_ratelimit()) 888 return; 889 pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n", 890 ea, access, current->comm); 891 pr_info(" trap=0x%lx vsid=0x%lx ssize=%d psize=%d pte=0x%lx\n", 892 trap, vsid, ssize, psize, pte); 893} 894 895/* Result code is: 896 * 0 - handled 897 * 1 - normal page fault 898 * -1 - critical hash insertion error 899 * -2 - access not permitted by subpage protection mechanism 900 */ 901int hash_page(unsigned long ea, unsigned long access, unsigned long trap) 902{ 903 pgd_t *pgdir; 904 unsigned long vsid; 905 struct mm_struct *mm; 906 pte_t *ptep; 907 unsigned hugeshift; 908 const struct cpumask *tmp; 909 int rc, user_region = 0, local = 0; 910 int psize, ssize; 911 912 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", 913 ea, access, trap); 914 915 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) { 916 DBG_LOW(" out of pgtable range !\n"); 917 return 1; 918 } 919 920 /* Get region & vsid */ 921 switch (REGION_ID(ea)) { 922 case USER_REGION_ID: 923 user_region = 1; 924 mm = current->mm; 925 if (! mm) { 926 DBG_LOW(" user region with no mm !\n"); 927 return 1; 928 } 929 psize = get_slice_psize(mm, ea); 930 ssize = user_segment_size(ea); 931 vsid = get_vsid(mm->context.id, ea, ssize); 932 break; 933 case VMALLOC_REGION_ID: 934 mm = &init_mm; 935 vsid = get_kernel_vsid(ea, mmu_kernel_ssize); 936 if (ea < VMALLOC_END) 937 psize = mmu_vmalloc_psize; 938 else 939 psize = mmu_io_psize; 940 ssize = mmu_kernel_ssize; 941 break; 942 default: 943 /* Not a valid range 944 * Send the problem up to do_page_fault 945 */ 946 return 1; 947 } 948 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); 949 950 /* Get pgdir */ 951 pgdir = mm->pgd; 952 if (pgdir == NULL) 953 return 1; 954 955 /* Check CPU locality */ 956 tmp = cpumask_of(smp_processor_id()); 957 if (user_region && cpumask_equal(mm_cpumask(mm), tmp)) 958 local = 1; 959 960#ifndef CONFIG_PPC_64K_PAGES 961 /* If we use 4K pages and our psize is not 4K, then we might 962 * be hitting a special driver mapping, and need to align the 963 * address before we fetch the PTE. 964 * 965 * It could also be a hugepage mapping, in which case this is 966 * not necessary, but it's not harmful, either. 967 */ 968 if (psize != MMU_PAGE_4K) 969 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 970#endif /* CONFIG_PPC_64K_PAGES */ 971 972 /* Get PTE and page size from page tables */ 973 ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); 974 if (ptep == NULL || !pte_present(*ptep)) { 975 DBG_LOW(" no PTE !\n"); 976 return 1; 977 } 978 979 /* Add _PAGE_PRESENT to the required access perm */ 980 access |= _PAGE_PRESENT; 981 982 /* Pre-check access permissions (will be re-checked atomically 983 * in __hash_page_XX but this pre-check is a fast path 984 */ 985 if (access & ~pte_val(*ptep)) { 986 DBG_LOW(" no access !\n"); 987 return 1; 988 } 989 990#ifdef CONFIG_HUGETLB_PAGE 991 if (hugeshift) 992 return __hash_page_huge(ea, access, vsid, ptep, trap, local, 993 ssize, hugeshift, psize); 994#endif /* CONFIG_HUGETLB_PAGE */ 995 996#ifndef CONFIG_PPC_64K_PAGES 997 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep)); 998#else 999 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep), 1000 pte_val(*(ptep + PTRS_PER_PTE))); 1001#endif 1002 /* Do actual hashing */ 1003#ifdef CONFIG_PPC_64K_PAGES 1004 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */ 1005 if ((pte_val(*ptep) & _PAGE_4K_PFN) && psize == MMU_PAGE_64K) { 1006 demote_segment_4k(mm, ea); 1007 psize = MMU_PAGE_4K; 1008 } 1009 1010 /* If this PTE is non-cacheable and we have restrictions on 1011 * using non cacheable large pages, then we switch to 4k 1012 */ 1013 if (mmu_ci_restrictions && psize == MMU_PAGE_64K && 1014 (pte_val(*ptep) & _PAGE_NO_CACHE)) { 1015 if (user_region) { 1016 demote_segment_4k(mm, ea); 1017 psize = MMU_PAGE_4K; 1018 } else if (ea < VMALLOC_END) { 1019 /* 1020 * some driver did a non-cacheable mapping 1021 * in vmalloc space, so switch vmalloc 1022 * to 4k pages 1023 */ 1024 printk(KERN_ALERT "Reducing vmalloc segment " 1025 "to 4kB pages because of " 1026 "non-cacheable mapping\n"); 1027 psize = mmu_vmalloc_psize = MMU_PAGE_4K; 1028#ifdef CONFIG_SPU_BASE 1029 spu_flush_all_slbs(mm); 1030#endif 1031 } 1032 } 1033 if (user_region) { 1034 if (psize != get_paca_psize(ea)) { 1035 get_paca()->context = mm->context; 1036 slb_flush_and_rebolt(); 1037 } 1038 } else if (get_paca()->vmalloc_sllp != 1039 mmu_psize_defs[mmu_vmalloc_psize].sllp) { 1040 get_paca()->vmalloc_sllp = 1041 mmu_psize_defs[mmu_vmalloc_psize].sllp; 1042 slb_vmalloc_update(); 1043 } 1044#endif /* CONFIG_PPC_64K_PAGES */ 1045 1046#ifdef CONFIG_PPC_HAS_HASH_64K 1047 if (psize == MMU_PAGE_64K) 1048 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); 1049 else 1050#endif /* CONFIG_PPC_HAS_HASH_64K */ 1051 { 1052 int spp = subpage_protection(mm, ea); 1053 if (access & spp) 1054 rc = -2; 1055 else 1056 rc = __hash_page_4K(ea, access, vsid, ptep, trap, 1057 local, ssize, spp); 1058 } 1059 1060 /* Dump some info in case of hash insertion failure, they should 1061 * never happen so it is really useful to know if/when they do 1062 */ 1063 if (rc == -1) 1064 hash_failure_debug(ea, access, vsid, trap, ssize, psize, 1065 pte_val(*ptep)); 1066#ifndef CONFIG_PPC_64K_PAGES 1067 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); 1068#else 1069 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep), 1070 pte_val(*(ptep + PTRS_PER_PTE))); 1071#endif 1072 DBG_LOW(" -> rc=%d\n", rc); 1073 return rc; 1074} 1075EXPORT_SYMBOL_GPL(hash_page); 1076 1077void hash_preload(struct mm_struct *mm, unsigned long ea, 1078 unsigned long access, unsigned long trap) 1079{ 1080 unsigned long vsid; 1081 pgd_t *pgdir; 1082 pte_t *ptep; 1083 unsigned long flags; 1084 int rc, ssize, local = 0; 1085 1086 BUG_ON(REGION_ID(ea) != USER_REGION_ID); 1087 1088#ifdef CONFIG_PPC_MM_SLICES 1089 /* We only prefault standard pages for now */ 1090 if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize)) 1091 return; 1092#endif 1093 1094 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx," 1095 " trap=%lx\n", mm, mm->pgd, ea, access, trap); 1096 1097 /* Get Linux PTE if available */ 1098 pgdir = mm->pgd; 1099 if (pgdir == NULL) 1100 return; 1101 ptep = find_linux_pte(pgdir, ea); 1102 if (!ptep) 1103 return; 1104 1105#ifdef CONFIG_PPC_64K_PAGES 1106 /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on 1107 * a 64K kernel), then we don't preload, hash_page() will take 1108 * care of it once we actually try to access the page. 1109 * That way we don't have to duplicate all of the logic for segment 1110 * page size demotion here 1111 */ 1112 if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE)) 1113 return; 1114#endif /* CONFIG_PPC_64K_PAGES */ 1115 1116 /* Get VSID */ 1117 ssize = user_segment_size(ea); 1118 vsid = get_vsid(mm->context.id, ea, ssize); 1119 1120 /* Hash doesn't like irqs */ 1121 local_irq_save(flags); 1122 1123 /* Is that local to this CPU ? */ 1124 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) 1125 local = 1; 1126 1127 /* Hash it in */ 1128#ifdef CONFIG_PPC_HAS_HASH_64K 1129 if (mm->context.user_psize == MMU_PAGE_64K) 1130 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); 1131 else 1132#endif /* CONFIG_PPC_HAS_HASH_64K */ 1133 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, 1134 subpage_protection(mm, ea)); 1135 1136 /* Dump some info in case of hash insertion failure, they should 1137 * never happen so it is really useful to know if/when they do 1138 */ 1139 if (rc == -1) 1140 hash_failure_debug(ea, access, vsid, trap, ssize, 1141 mm->context.user_psize, pte_val(*ptep)); 1142 1143 local_irq_restore(flags); 1144} 1145 1146/* WARNING: This is called from hash_low_64.S, if you change this prototype, 1147 * do not forget to update the assembly call site ! 1148 */ 1149void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, 1150 int local) 1151{ 1152 unsigned long hash, index, shift, hidx, slot; 1153 1154 DBG_LOW("flush_hash_page(va=%016lx)\n", va); 1155 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 1156 hash = hpt_hash(va, shift, ssize); 1157 hidx = __rpte_to_hidx(pte, index); 1158 if (hidx & _PTEIDX_SECONDARY) 1159 hash = ~hash; 1160 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1161 slot += hidx & _PTEIDX_GROUP_IX; 1162 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx); 1163 ppc_md.hpte_invalidate(slot, va, psize, ssize, local); 1164 } pte_iterate_hashed_end(); 1165} 1166 1167void flush_hash_range(unsigned long number, int local) 1168{ 1169 if (ppc_md.flush_hash_range) 1170 ppc_md.flush_hash_range(number, local); 1171 else { 1172 int i; 1173 struct ppc64_tlb_batch *batch = 1174 &__get_cpu_var(ppc64_tlb_batch); 1175 1176 for (i = 0; i < number; i++) 1177 flush_hash_page(batch->vaddr[i], batch->pte[i], 1178 batch->psize, batch->ssize, local); 1179 } 1180} 1181 1182/* 1183 * low_hash_fault is called when we the low level hash code failed 1184 * to instert a PTE due to an hypervisor error 1185 */ 1186void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) 1187{ 1188 if (user_mode(regs)) { 1189#ifdef CONFIG_PPC_SUBPAGE_PROT 1190 if (rc == -2) 1191 _exception(SIGSEGV, regs, SEGV_ACCERR, address); 1192 else 1193#endif 1194 _exception(SIGBUS, regs, BUS_ADRERR, address); 1195 } else 1196 bad_page_fault(regs, address, SIGBUS); 1197} 1198 1199#ifdef CONFIG_DEBUG_PAGEALLOC 1200static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) 1201{ 1202 unsigned long hash, hpteg; 1203 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1204 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); 1205 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL); 1206 int ret; 1207 1208 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); 1209 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 1210 1211 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr), 1212 mode, HPTE_V_BOLTED, 1213 mmu_linear_psize, mmu_kernel_ssize); 1214 BUG_ON (ret < 0); 1215 spin_lock(&linear_map_hash_lock); 1216 BUG_ON(linear_map_hash_slots[lmi] & 0x80); 1217 linear_map_hash_slots[lmi] = ret | 0x80; 1218 spin_unlock(&linear_map_hash_lock); 1219} 1220 1221static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) 1222{ 1223 unsigned long hash, hidx, slot; 1224 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1225 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); 1226 1227 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); 1228 spin_lock(&linear_map_hash_lock); 1229 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); 1230 hidx = linear_map_hash_slots[lmi] & 0x7f; 1231 linear_map_hash_slots[lmi] = 0; 1232 spin_unlock(&linear_map_hash_lock); 1233 if (hidx & _PTEIDX_SECONDARY) 1234 hash = ~hash; 1235 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1236 slot += hidx & _PTEIDX_GROUP_IX; 1237 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0); 1238} 1239 1240void kernel_map_pages(struct page *page, int numpages, int enable) 1241{ 1242 unsigned long flags, vaddr, lmi; 1243 int i; 1244 1245 local_irq_save(flags); 1246 for (i = 0; i < numpages; i++, page++) { 1247 vaddr = (unsigned long)page_address(page); 1248 lmi = __pa(vaddr) >> PAGE_SHIFT; 1249 if (lmi >= linear_map_hash_count) 1250 continue; 1251 if (enable) 1252 kernel_map_linear_page(vaddr, lmi); 1253 else 1254 kernel_unmap_linear_page(vaddr, lmi); 1255 } 1256 local_irq_restore(flags); 1257} 1258#endif /* CONFIG_DEBUG_PAGEALLOC */ 1259 1260void setup_initial_memory_limit(phys_addr_t first_memblock_base, 1261 phys_addr_t first_memblock_size) 1262{ 1263 /* We don't currently support the first MEMBLOCK not mapping 0 1264 * physical on those processors 1265 */ 1266 BUG_ON(first_memblock_base != 0); 1267 1268 /* On LPAR systems, the first entry is our RMA region, 1269 * non-LPAR 64-bit hash MMU systems don't have a limitation 1270 * on real mode access, but using the first entry works well 1271 * enough. We also clamp it to 1G to avoid some funky things 1272 * such as RTAS bugs etc... 1273 */ 1274 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); 1275 1276 /* Finally limit subsequent allocations */ 1277 memblock_set_current_limit(ppc64_rma_size); 1278}