Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

memblock: Remove rmo_size, burry it in arch/powerpc where it belongs

The RMA (RMO is a misnomer) is a concept specific to ppc64 (in fact
server ppc64 though I hijack it on embedded ppc64 for similar purposes)
and represents the area of memory that can be accessed in real mode
(aka with MMU off), or on embedded, from the exception vectors (which
is bolted in the TLB) which pretty much boils down to the same thing.

We take that out of the generic MEMBLOCK data structure and move it into
arch/powerpc where it belongs, renaming it to "RMA" while at it.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

+125 -40
+12
arch/powerpc/include/asm/mmu.h
··· 2 2 #define _ASM_POWERPC_MMU_H_ 3 3 #ifdef __KERNEL__ 4 4 5 + #include <linux/types.h> 6 + 5 7 #include <asm/asm-compat.h> 6 8 #include <asm/feature-fixups.h> 7 9 ··· 83 81 /* MMU initialization (64-bit only fo now) */ 84 82 extern void early_init_mmu(void); 85 83 extern void early_init_mmu_secondary(void); 84 + 85 + extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, 86 + phys_addr_t first_memblock_size); 87 + 88 + #ifdef CONFIG_PPC64 89 + /* This is our real memory area size on ppc64 server, on embedded, we 90 + * make it match the size our of bolted TLB area 91 + */ 92 + extern u64 ppc64_rma_size; 93 + #endif /* CONFIG_PPC64 */ 86 94 87 95 #endif /* !__ASSEMBLY__ */ 88 96
+1 -5
arch/powerpc/kernel/head_40x.S
··· 923 923 mtspr SPRN_PID,r0 924 924 sync 925 925 926 - /* Configure and load two entries into TLB slots 62 and 63. 927 - * In case we are pinning TLBs, these are reserved in by the 928 - * other TLB functions. If not reserving, then it doesn't 929 - * matter where they are loaded. 930 - */ 926 + /* Configure and load one entry into TLB slots 63 */ 931 927 clrrwi r4,r4,10 /* Mask off the real page number */ 932 928 ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ 933 929
+1 -1
arch/powerpc/kernel/paca.c
··· 117 117 * the first segment. On iSeries they must be within the area mapped 118 118 * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. 119 119 */ 120 - limit = min(0x10000000ULL, memblock.rmo_size); 120 + limit = min(0x10000000ULL, ppc64_rma_size); 121 121 if (firmware_has_feature(FW_FEATURE_ISERIES)) 122 122 limit = min(limit, HvPagesToMap * HVPAGESIZE); 123 123
+8 -21
arch/powerpc/kernel/prom.c
··· 66 66 int __initdata iommu_is_off; 67 67 int __initdata iommu_force_on; 68 68 unsigned long tce_alloc_start, tce_alloc_end; 69 + u64 ppc64_rma_size; 69 70 #endif 70 71 71 72 static int __init early_parse_mem(char *p) ··· 493 492 494 493 void __init early_init_dt_add_memory_arch(u64 base, u64 size) 495 494 { 496 - #if defined(CONFIG_PPC64) 495 + #ifdef CONFIG_PPC64 497 496 if (iommu_is_off) { 498 497 if (base >= 0x80000000ul) 499 498 return; ··· 502 501 } 503 502 #endif 504 503 505 - memblock_add(base, size); 506 - 504 + /* First MEMBLOCK added, do some special initializations */ 505 + if (memstart_addr == ~(phys_addr_t)0) 506 + setup_initial_memory_limit(base, size); 507 507 memstart_addr = min((u64)memstart_addr, base); 508 + 509 + /* Add the chunk to the MEMBLOCK list */ 510 + memblock_add(base, size); 508 511 } 509 512 510 513 u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) ··· 660 655 static inline void __init phyp_dump_reserve_mem(void) {} 661 656 #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ 662 657 663 - static void set_boot_memory_limit(void) 664 - { 665 - #ifdef CONFIG_PPC32 666 - /* 601 can only access 16MB at the moment */ 667 - if (PVR_VER(mfspr(SPRN_PVR)) == 1) 668 - memblock_set_current_limit(0x01000000); 669 - /* 8xx can only access 8MB at the moment */ 670 - else if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) 671 - memblock_set_current_limit(0x00800000); 672 - else 673 - memblock_set_current_limit(0x10000000); 674 - #else 675 - memblock_set_current_limit(memblock.rmo_size); 676 - #endif 677 - } 678 - 679 658 void __init early_init_devtree(void *params) 680 659 { 681 660 phys_addr_t limit; ··· 722 733 memblock_dump_all(); 723 734 724 735 DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); 725 - 726 - set_boot_memory_limit(); 727 736 728 737 /* We may need to relocate the flat tree, do it now. 729 738 * FIXME .. and the initrd too? */
+1 -1
arch/powerpc/kernel/rtas.c
··· 934 934 */ 935 935 #ifdef CONFIG_PPC64 936 936 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { 937 - rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX); 937 + rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX); 938 938 ibm_suspend_me_token = rtas_token("ibm,suspend-me"); 939 939 } 940 940 #endif
+1 -1
arch/powerpc/kernel/setup_64.c
··· 487 487 * bringup, we need to get at them in real mode. This means they 488 488 * must also be within the RMO region. 489 489 */ 490 - limit = min(slb0_limit(), memblock.rmo_size); 490 + limit = min(slb0_limit(), ppc64_rma_size); 491 491 492 492 for_each_possible_cpu(i) { 493 493 unsigned long sp;
+13 -1
arch/powerpc/mm/40x_mmu.c
··· 141 141 * coverage with normal-sized pages (or other reasons) do not 142 142 * attempt to allocate outside the allowed range. 143 143 */ 144 - memblock_set_current_limit(memstart_addr + mapped); 144 + memblock_set_current_limit(mapped); 145 145 146 146 return mapped; 147 + } 148 + 149 + void setup_initial_memory_limit(phys_addr_t first_memblock_base, 150 + phys_addr_t first_memblock_size) 151 + { 152 + /* We don't currently support the first MEMBLOCK not mapping 0 153 + * physical on those processors 154 + */ 155 + BUG_ON(first_memblock_base != 0); 156 + 157 + /* 40x can only access 16MB at the moment (see head_40x.S) */ 158 + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); 147 159 }
+14
arch/powerpc/mm/44x_mmu.c
··· 24 24 */ 25 25 26 26 #include <linux/init.h> 27 + #include <linux/memblock.h> 28 + 27 29 #include <asm/mmu.h> 28 30 #include <asm/system.h> 29 31 #include <asm/page.h> ··· 213 211 #endif /* DEBUG */ 214 212 } 215 213 return total_lowmem; 214 + } 215 + 216 + void setup_initial_memory_limit(phys_addr_t first_memblock_base, 217 + phys_addr_t first_memblock_size) 218 + { 219 + /* We don't currently support the first MEMBLOCK not mapping 0 220 + * physical on those processors 221 + */ 222 + BUG_ON(first_memblock_base != 0); 223 + 224 + /* 44x has a 256M TLB entry pinned at boot */ 225 + memblock_set_current_limit(min_t(u64, first_memblock_size, PPC_PIN_SIZE)); 216 226 } 217 227 218 228 #ifdef CONFIG_SMP
+9
arch/powerpc/mm/fsl_booke_mmu.c
··· 215 215 216 216 memblock_set_current_limit(memstart_addr + __max_low_memory); 217 217 } 218 + 219 + void setup_initial_memory_limit(phys_addr_t first_memblock_base, 220 + phys_addr_t first_memblock_size) 221 + { 222 + phys_addr_t limit = first_memblock_base + first_memblock_size; 223 + 224 + /* 64M mapped initially according to head_fsl_booke.S */ 225 + memblock_set_current_limit(min_t(u64, limit, 0x04000000)); 226 + }
+21 -1
arch/powerpc/mm/hash_utils_64.c
··· 649 649 #ifdef CONFIG_DEBUG_PAGEALLOC 650 650 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; 651 651 linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, 652 - 1, memblock.rmo_size)); 652 + 1, ppc64_rma_size)); 653 653 memset(linear_map_hash_slots, 0, linear_map_hash_count); 654 654 #endif /* CONFIG_DEBUG_PAGEALLOC */ 655 655 ··· 1248 1248 local_irq_restore(flags); 1249 1249 } 1250 1250 #endif /* CONFIG_DEBUG_PAGEALLOC */ 1251 + 1252 + void setup_initial_memory_limit(phys_addr_t first_memblock_base, 1253 + phys_addr_t first_memblock_size) 1254 + { 1255 + /* We don't currently support the first MEMBLOCK not mapping 0 1256 + * physical on those processors 1257 + */ 1258 + BUG_ON(first_memblock_base != 0); 1259 + 1260 + /* On LPAR systems, the first entry is our RMA region, 1261 + * non-LPAR 64-bit hash MMU systems don't have a limitation 1262 + * on real mode access, but using the first entry works well 1263 + * enough. We also clamp it to 1G to avoid some funky things 1264 + * such as RTAS bugs etc... 1265 + */ 1266 + ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); 1267 + 1268 + /* Finally limit subsequent allocations */ 1269 + memblock_set_current_limit(ppc64_rma_size); 1270 + }
+14
arch/powerpc/mm/init_32.c
··· 237 237 } 238 238 #endif 239 239 240 + 241 + #ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ 242 + void setup_initial_memory_limit(phys_addr_t first_memblock_base, 243 + phys_addr_t first_memblock_size) 244 + { 245 + /* We don't currently support the first MEMBLOCK not mapping 0 246 + * physical on those processors 247 + */ 248 + BUG_ON(first_memblock_base != 0); 249 + 250 + /* 8xx can only access 8MB at the moment */ 251 + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); 252 + } 253 + #endif /* CONFIG_8xx */
+1
arch/powerpc/mm/init_64.c
··· 328 328 return 0; 329 329 } 330 330 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 331 +
+15
arch/powerpc/mm/ppc_mmu_32.c
··· 271 271 272 272 if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); 273 273 } 274 + 275 + void setup_initial_memory_limit(phys_addr_t first_memblock_base, 276 + phys_addr_t first_memblock_size) 277 + { 278 + /* We don't currently support the first MEMBLOCK not mapping 0 279 + * physical on those processors 280 + */ 281 + BUG_ON(first_memblock_base != 0); 282 + 283 + /* 601 can only access 16MB at the moment */ 284 + if (PVR_VER(mfspr(SPRN_PVR)) == 1) 285 + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000)); 286 + else /* Anything else has 256M mapped */ 287 + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000)); 288 + }
+14
arch/powerpc/mm/tlb_nohash.c
··· 446 446 __early_init_mmu(0); 447 447 } 448 448 449 + void setup_initial_memory_limit(phys_addr_t first_memblock_base, 450 + phys_addr_t first_memblock_size) 451 + { 452 + /* On Embedded 64-bit, we adjust the RMA size to match 453 + * the bolted TLB entry. We know for now that only 1G 454 + * entries are supported though that may eventually 455 + * change. We crop it to the size of the first MEMBLOCK to 456 + * avoid going over total available memory just in case... 457 + */ 458 + ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); 459 + 460 + /* Finally limit subsequent allocations */ 461 + memblock_set_current_limit(ppc64_memblock_base + ppc64_rma_size); 462 + } 449 463 #endif /* CONFIG_PPC64 */
-1
include/linux/memblock.h
··· 33 33 34 34 struct memblock { 35 35 unsigned long debug; 36 - u64 rmo_size; 37 36 u64 current_limit; 38 37 struct memblock_type memory; 39 38 struct memblock_type reserved;
-8
mm/memblock.c
··· 49 49 return; 50 50 51 51 pr_info("MEMBLOCK configuration:\n"); 52 - pr_info(" rmo_size = 0x%llx\n", (unsigned long long)memblock.rmo_size); 53 52 pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size); 54 53 55 54 memblock_dump(&memblock.memory, "memory"); ··· 194 195 195 196 long memblock_add(u64 base, u64 size) 196 197 { 197 - /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */ 198 - if (base == 0) 199 - memblock.rmo_size = size; 200 - 201 198 return memblock_add_region(&memblock.memory, base, size); 202 199 203 200 } ··· 453 458 memblock.memory.cnt = i + 1; 454 459 break; 455 460 } 456 - 457 - if (memblock.memory.regions[0].size < memblock.rmo_size) 458 - memblock.rmo_size = memblock.memory.regions[0].size; 459 461 460 462 memory_limit = memblock_end_of_DRAM(); 461 463