Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

memblock: Introduce default allocation limit and use it to replace explicit ones

This introduce memblock.current_limit which is used to limit allocations
from memblock_alloc() or memblock_alloc_base(..., MEMBLOCK_ALLOC_ACCESSIBLE).

The old MEMBLOCK_ALLOC_ANYWHERE changes value from 0 to ~(u64)0 and can still
be used with memblock_alloc_base() to allocate really anywhere.

It is -no-longer- cropped to MEMBLOCK_REAL_LIMIT which disappears.

Note to archs: I'm leaving the default limit to MEMBLOCK_ALLOC_ANYWHERE. I
strongly recommend that you ensure that you set an appropriate limit
during boot in order to guarantee that an memblock_alloc() at any time
results in something that is accessible with a simple __va().

The reason is that a subsequent patch will introduce the ability for
the array to resize itself by reallocating itself. The MEMBLOCK core will
honor the current limit when performing those allocations.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

+63 -53
-3
arch/microblaze/include/asm/memblock.h
··· 9 9 #ifndef _ASM_MICROBLAZE_MEMBLOCK_H 10 10 #define _ASM_MICROBLAZE_MEMBLOCK_H 11 11 12 - /* MEMBLOCK limit is OFF */ 13 - #define MEMBLOCK_REAL_LIMIT 0xFFFFFFFF 14 - 15 12 #endif /* _ASM_MICROBLAZE_MEMBLOCK_H */ 16 13 17 14
-7
arch/powerpc/include/asm/memblock.h
··· 5 5 6 6 #define MEMBLOCK_DBG(fmt...) udbg_printf(fmt) 7 7 8 - #ifdef CONFIG_PPC32 9 - extern phys_addr_t lowmem_end_addr; 10 - #define MEMBLOCK_REAL_LIMIT lowmem_end_addr 11 - #else 12 - #define MEMBLOCK_REAL_LIMIT 0 13 - #endif 14 - 15 8 #endif /* _ASM_POWERPC_MEMBLOCK_H */
+19 -1
arch/powerpc/kernel/prom.c
··· 98 98 99 99 if ((memory_limit && (start + size) > memory_limit) || 100 100 overlaps_crashkernel(start, size)) { 101 - p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size)); 101 + p = __va(memblock_alloc(size, PAGE_SIZE)); 102 102 memcpy(p, initial_boot_params, size); 103 103 initial_boot_params = (struct boot_param_header *)p; 104 104 DBG("Moved device tree to 0x%p\n", p); ··· 655 655 static inline void __init phyp_dump_reserve_mem(void) {} 656 656 #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ 657 657 658 + static void set_boot_memory_limit(void) 659 + { 660 + #ifdef CONFIG_PPC32 661 + /* 601 can only access 16MB at the moment */ 662 + if (PVR_VER(mfspr(SPRN_PVR)) == 1) 663 + memblock_set_current_limit(0x01000000); 664 + /* 8xx can only access 8MB at the moment */ 665 + else if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) 666 + memblock_set_current_limit(0x00800000); 667 + else 668 + memblock_set_current_limit(0x10000000); 669 + #else 670 + memblock_set_current_limit(memblock.rmo_size); 671 + #endif 672 + } 658 673 659 674 void __init early_init_devtree(void *params) 660 675 { ··· 698 683 699 684 /* Scan memory nodes and rebuild MEMBLOCKs */ 700 685 memblock_init(); 686 + 701 687 of_scan_flat_dt(early_init_dt_scan_root, NULL); 702 688 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); 703 689 ··· 733 717 memblock_dump_all(); 734 718 735 719 DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); 720 + 721 + set_boot_memory_limit(); 736 722 737 723 /* We may need to relocate the flat tree, do it now. 738 724 * FIXME .. and the initrd too? */
+1 -1
arch/powerpc/kernel/setup_32.c
··· 246 246 unsigned int i; 247 247 248 248 /* interrupt stacks must be in lowmem, we get that for free on ppc32 249 - * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ 249 + * as the memblock is limited to lowmem by default */ 250 250 for_each_possible_cpu(i) { 251 251 softirq_ctx[i] = (struct thread_info *) 252 252 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+3 -2
arch/powerpc/mm/40x_mmu.c
··· 35 35 #include <linux/init.h> 36 36 #include <linux/delay.h> 37 37 #include <linux/highmem.h> 38 + #include <linux/memblock.h> 38 39 39 40 #include <asm/pgalloc.h> 40 41 #include <asm/prom.h> ··· 48 47 #include <asm/bootx.h> 49 48 #include <asm/machdep.h> 50 49 #include <asm/setup.h> 50 + 51 51 #include "mmu_decl.h" 52 52 53 53 extern int __map_without_ltlbs; ··· 141 139 * coverage with normal-sized pages (or other reasons) do not 142 140 * attempt to allocate outside the allowed range. 143 141 */ 144 - 145 - __initial_memory_limit_addr = memstart_addr + mapped; 142 + memblock_set_current_limit(memstart_addr + mapped); 146 143 147 144 return mapped; 148 145 }
+2 -1
arch/powerpc/mm/fsl_booke_mmu.c
··· 40 40 #include <linux/init.h> 41 41 #include <linux/delay.h> 42 42 #include <linux/highmem.h> 43 + #include <linux/memblock.h> 43 44 44 45 #include <asm/pgalloc.h> 45 46 #include <asm/prom.h> ··· 213 212 pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, 214 213 (unsigned int)((total_lowmem - __max_low_memory) >> 20)); 215 214 216 - __initial_memory_limit_addr = memstart_addr + __max_low_memory; 215 + memblock_set_current_limit(memstart_addr + __max_low_memory); 217 216 }
+2 -1
arch/powerpc/mm/hash_utils_64.c
··· 696 696 #endif /* CONFIG_U3_DART */ 697 697 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), 698 698 prot, mmu_linear_psize, mmu_kernel_ssize)); 699 - } 699 + } 700 + memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); 700 701 701 702 /* 702 703 * If we have a memory_limit and we've allocated TCEs then we need to
+7 -22
arch/powerpc/mm/init_32.c
··· 92 92 unsigned long __max_low_memory = MAX_LOW_MEM; 93 93 94 94 /* 95 - * address of the limit of what is accessible with initial MMU setup - 96 - * 256MB usually, but only 16MB on 601. 97 - */ 98 - phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000; 99 - 100 - /* 101 95 * Check for command-line options that affect what MMU_init will do. 102 96 */ 103 97 void MMU_setup(void) ··· 119 125 { 120 126 if (ppc_md.progress) 121 127 ppc_md.progress("MMU:enter", 0x111); 122 - 123 - /* 601 can only access 16MB at the moment */ 124 - if (PVR_VER(mfspr(SPRN_PVR)) == 1) 125 - __initial_memory_limit_addr = 0x01000000; 126 - /* 8xx can only access 8MB at the moment */ 127 - if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) 128 - __initial_memory_limit_addr = 0x00800000; 129 128 130 129 /* parse args from command line */ 131 130 MMU_setup(); ··· 177 190 #ifdef CONFIG_BOOTX_TEXT 178 191 btext_unmap(); 179 192 #endif 193 + 194 + /* Shortly after that, the entire linear mapping will be available */ 195 + memblock_set_current_limit(lowmem_end_addr); 180 196 } 181 197 182 198 /* This is only called until mem_init is done. */ 183 199 void __init *early_get_page(void) 184 200 { 185 - void *p; 186 - 187 - if (init_bootmem_done) { 188 - p = alloc_bootmem_pages(PAGE_SIZE); 189 - } else { 190 - p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 191 - __initial_memory_limit_addr)); 192 - } 193 - return p; 201 + if (init_bootmem_done) 202 + return alloc_bootmem_pages(PAGE_SIZE); 203 + else 204 + return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); 194 205 } 195 206 196 207 /* Free up now-unused memory */
+1 -2
arch/powerpc/mm/ppc_mmu_32.c
··· 223 223 * Find some memory for the hash table. 224 224 */ 225 225 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); 226 - Hash = __va(memblock_alloc_base(Hash_size, Hash_size, 227 - __initial_memory_limit_addr)); 226 + Hash = __va(memblock_alloc(Hash_size, Hash_size)); 228 227 cacheable_memzero(Hash, Hash_size); 229 228 _SDR1 = __pa(Hash) | SDR1_LOW_BITS; 230 229
+2
arch/powerpc/mm/tlb_nohash.c
··· 432 432 * the MMU configuration 433 433 */ 434 434 mb(); 435 + 436 + memblock_set_current_limit(linear_map_top); 435 437 } 436 438 437 439 void __init early_init_mmu(void)
-2
arch/sh/include/asm/memblock.h
··· 1 1 #ifndef __ASM_SH_MEMBLOCK_H 2 2 #define __ASM_SH_MEMBLOCK_H 3 3 4 - #define MEMBLOCK_REAL_LIMIT 0 5 - 6 4 #endif /* __ASM_SH_MEMBLOCK_H */
-2
arch/sparc/include/asm/memblock.h
··· 5 5 6 6 #define MEMBLOCK_DBG(fmt...) prom_printf(fmt) 7 7 8 - #define MEMBLOCK_REAL_LIMIT 0 9 - 10 8 #endif /* !(_SPARC64_MEMBLOCK_H) */
+15 -1
include/linux/memblock.h
··· 34 34 struct memblock { 35 35 unsigned long debug; 36 36 u64 rmo_size; 37 + u64 current_limit; 37 38 struct memblock_type memory; 38 39 struct memblock_type reserved; 39 40 }; ··· 47 46 extern long memblock_remove(u64 base, u64 size); 48 47 extern long __init memblock_free(u64 base, u64 size); 49 48 extern long __init memblock_reserve(u64 base, u64 size); 49 + 50 50 extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid); 51 51 extern u64 __init memblock_alloc(u64 size, u64 align); 52 + 53 + /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ 54 + #define MEMBLOCK_ALLOC_ANYWHERE (~(u64)0) 55 + #define MEMBLOCK_ALLOC_ACCESSIBLE 0 56 + 52 57 extern u64 __init memblock_alloc_base(u64 size, 53 58 u64, u64 max_addr); 54 - #define MEMBLOCK_ALLOC_ANYWHERE 0 55 59 extern u64 __init __memblock_alloc_base(u64 size, 56 60 u64 align, u64 max_addr); 57 61 extern u64 __init memblock_phys_mem_size(void); ··· 71 65 72 66 /* Provided by the architecture */ 73 67 extern u64 memblock_nid_range(u64 start, u64 end, int *nid); 68 + 69 + /** 70 + * memblock_set_current_limit - Set the current allocation limit to allow 71 + * limiting allocations to what is currently 72 + * accessible during boot 73 + * @limit: New limit value (physical address) 74 + */ 75 + extern void memblock_set_current_limit(u64 limit); 74 76 75 77 76 78 /*
+11 -8
mm/memblock.c
··· 115 115 memblock.reserved.regions[0].base = 0; 116 116 memblock.reserved.regions[0].size = 0; 117 117 memblock.reserved.cnt = 1; 118 + 119 + memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; 118 120 } 119 121 120 122 void __init memblock_analyze(void) ··· 375 373 376 374 u64 __init memblock_alloc(u64 size, u64 align) 377 375 { 378 - return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); 376 + return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 379 377 } 380 378 381 379 u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) ··· 401 399 402 400 size = memblock_align_up(size, align); 403 401 404 - /* On some platforms, make sure we allocate lowmem */ 405 - /* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */ 406 - if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) 407 - max_addr = MEMBLOCK_REAL_LIMIT; 408 - 409 402 /* Pump up max_addr */ 410 - if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) 411 - max_addr = ~(u64)0; 403 + if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE) 404 + max_addr = memblock.current_limit; 412 405 413 406 /* We do a top-down search, this tends to limit memory 414 407 * fragmentation by keeping early boot allocs near the ··· 522 525 int memblock_is_region_reserved(u64 base, u64 size) 523 526 { 524 527 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 528 + } 529 + 530 + 531 + void __init memblock_set_current_limit(u64 limit) 532 + { 533 + memblock.current_limit = limit; 525 534 } 526 535