Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch, mm: set max_mapnr when allocating memory map for FLATMEM

max_mapnr is essentially the size of the memory map for systems that use
FLATMEM. There is no reason to calculate it in each and every architecture
when it's anyway calculated in alloc_node_mem_map().

Drop setting of max_mapnr from architecture code and set it once in
alloc_node_mem_map().

While on it, move definition of mem_map and max_mapnr to mm/mm_init.c so
there won't be two copies for MMU and !MMU variants.

Link: https://lkml.kernel.org/r/20250313135003.836600-10-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com> [x86]
Tested-by: Mark Brown <broonie@kernel.org>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Guo Ren (csky) <guoren@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russel King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Mike Rapoport (Microsoft) and committed by
Andrew Morton
8268af30 d319c8b4

+21 -86
-1
arch/alpha/mm/init.c
··· 276 276 void __init 277 277 mem_init(void) 278 278 { 279 - set_max_mapnr(max_low_pfn); 280 279 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 281 280 memblock_free_all(); 282 281 }
-5
arch/arc/mm/init.c
··· 154 154 155 155 arch_pfn_offset = min(min_low_pfn, min_high_pfn); 156 156 kmap_init(); 157 - 158 - #else /* CONFIG_HIGHMEM */ 159 - /* pfn_valid() uses this when FLATMEM=y and HIGHMEM=n */ 160 - max_mapnr = max_low_pfn - min_low_pfn; 161 - 162 157 #endif /* CONFIG_HIGHMEM */ 163 158 164 159 free_area_init(max_zone_pfn);
-2
arch/arm/mm/init.c
··· 275 275 swiotlb_init(max_pfn > arm_dma_pfn_limit, SWIOTLB_VERBOSE); 276 276 #endif 277 277 278 - set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 279 - 280 278 #ifdef CONFIG_SA1111 281 279 /* now that our DMA memory is actually so designated, we can free it */ 282 280 memblock_phys_free(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
-4
arch/csky/mm/init.c
··· 46 46 { 47 47 #ifdef CONFIG_HIGHMEM 48 48 unsigned long tmp; 49 - 50 - set_max_mapnr(highend_pfn - ARCH_PFN_OFFSET); 51 - #else 52 - set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); 53 49 #endif 54 50 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 55 51
-1
arch/loongarch/mm/init.c
··· 78 78 79 79 void __init mem_init(void) 80 80 { 81 - max_mapnr = max_low_pfn; 82 81 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 83 82 84 83 memblock_free_all();
-4
arch/microblaze/mm/init.c
··· 104 104 * 105 105 * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) 106 106 * max_low_pfn 107 - * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) 108 107 */ 109 108 110 109 /* memory start is from the kernel end (aligned) to higher addr */ 111 110 min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ 112 - /* RAM is assumed contiguous */ 113 - max_mapnr = memory_size >> PAGE_SHIFT; 114 111 max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; 115 112 max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; 116 113 117 - pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr); 118 114 pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); 119 115 pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); 120 116 pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
-8
arch/mips/mm/init.c
··· 415 415 " %ldk highmem ignored\n", 416 416 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); 417 417 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; 418 - 419 - max_mapnr = max_low_pfn; 420 - } else if (highend_pfn) { 421 - max_mapnr = highend_pfn; 422 - } else { 423 - max_mapnr = max_low_pfn; 424 418 } 425 - #else 426 - max_mapnr = max_low_pfn; 427 419 #endif 428 420 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 429 421
-1
arch/nios2/kernel/setup.c
··· 158 158 *cmdline_p = boot_command_line; 159 159 160 160 find_limits(&min_low_pfn, &max_low_pfn, &max_pfn); 161 - max_mapnr = max_low_pfn; 162 161 163 162 memblock_reserve(__pa_symbol(_stext), _end - _stext); 164 163 #ifdef CONFIG_BLK_DEV_INITRD
+1 -1
arch/nios2/mm/init.c
··· 51 51 pagetable_init(); 52 52 pgd_current = swapper_pg_dir; 53 53 54 - max_zone_pfn[ZONE_NORMAL] = max_mapnr; 54 + max_zone_pfn[ZONE_NORMAL] = max_low_pfn; 55 55 56 56 /* pass the memory from the bootmem allocator to the main allocator */ 57 57 free_area_init(max_zone_pfn);
-1
arch/openrisc/mm/init.c
··· 193 193 { 194 194 BUG_ON(!mem_map); 195 195 196 - max_mapnr = max_low_pfn; 197 196 high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); 198 197 199 198 /* clear the zero-page */
-1
arch/parisc/mm/init.c
··· 563 563 #endif 564 564 565 565 high_memory = __va((max_pfn << PAGE_SHIFT)); 566 - set_max_mapnr(max_low_pfn); 567 566 memblock_free_all(); 568 567 569 568 #ifdef CONFIG_PA11
-2
arch/powerpc/kernel/setup-common.c
··· 957 957 958 958 /* Parse memory topology */ 959 959 mem_topology_setup(); 960 - /* Set max_mapnr before paging_init() */ 961 - set_max_mapnr(max_pfn); 962 960 high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); 963 961 964 962 /*
-1
arch/riscv/mm/init.c
··· 298 298 high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); 299 299 300 300 dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); 301 - set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); 302 301 303 302 reserve_initrd_mem(); 304 303
-1
arch/s390/mm/init.c
··· 159 159 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); 160 160 cpumask_set_cpu(0, mm_cpumask(&init_mm)); 161 161 162 - set_max_mapnr(max_low_pfn); 163 162 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 164 163 165 164 pv_init();
-1
arch/sh/mm/init.c
··· 290 290 */ 291 291 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 292 292 min_low_pfn = __MEMORY_START >> PAGE_SHIFT; 293 - set_max_mapnr(max_low_pfn - min_low_pfn); 294 293 295 294 nodes_clear(node_online_map); 296 295
-1
arch/sparc/mm/init_32.c
··· 275 275 276 276 taint_real_pages(); 277 277 278 - max_mapnr = last_valid_pfn - pfn_base; 279 278 high_memory = __va(max_low_pfn << PAGE_SHIFT); 280 279 memblock_free_all(); 281 280
-1
arch/um/include/shared/mem_user.h
··· 47 47 #define ROUND_4M(n) ((((unsigned long) (n)) + (1 << 22)) & ~((1 << 22) - 1)) 48 48 49 49 extern unsigned long find_iomem(char *driver, unsigned long *len_out); 50 - extern void mem_total_pages(unsigned long physmem, unsigned long iomem); 51 50 extern void setup_physmem(unsigned long start, unsigned long usable, 52 51 unsigned long len); 53 52 extern void map_memory(unsigned long virt, unsigned long phys,
-12
arch/um/kernel/physmem.c
··· 22 22 unsigned long high_physmem; 23 23 EXPORT_SYMBOL(high_physmem); 24 24 25 - void __init mem_total_pages(unsigned long physmem, unsigned long iomem) 26 - { 27 - unsigned long phys_pages, iomem_pages, total_pages; 28 - 29 - phys_pages = physmem >> PAGE_SHIFT; 30 - iomem_pages = iomem >> PAGE_SHIFT; 31 - 32 - total_pages = phys_pages + iomem_pages; 33 - 34 - max_mapnr = total_pages; 35 - } 36 - 37 25 void map_memory(unsigned long virt, unsigned long phys, unsigned long len, 38 26 int r, int w, int x) 39 27 {
-1
arch/um/kernel/um_arch.c
··· 419 419 420 420 stack_protections((unsigned long) init_task.stack); 421 421 setup_physmem(uml_physmem, uml_reserved, physmem_size); 422 - mem_total_pages(physmem_size, iomem_size); 423 422 uml_dtb_init(); 424 423 read_initrd(); 425 424
-3
arch/x86/mm/init_32.c
··· 650 650 651 651 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 652 652 653 - #ifdef CONFIG_FLATMEM 654 - max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn; 655 - #endif 656 653 __vmalloc_start_set = true; 657 654 658 655 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
-1
arch/xtensa/mm/init.c
··· 164 164 { 165 165 free_highpages(); 166 166 167 - max_mapnr = max_pfn - ARCH_PFN_OFFSET; 168 167 high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); 169 168 170 169 memblock_free_all();
+3 -2
include/asm-generic/memory_model.h
··· 19 19 #define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ 20 20 ARCH_PFN_OFFSET) 21 21 22 + /* avoid <linux/mm.h> include hell */ 23 + extern unsigned long max_mapnr; 24 + 22 25 #ifndef pfn_valid 23 26 static inline int pfn_valid(unsigned long pfn) 24 27 { 25 - /* avoid <linux/mm.h> include hell */ 26 - extern unsigned long max_mapnr; 27 28 unsigned long pfn_offset = ARCH_PFN_OFFSET; 28 29 29 30 return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr;
-11
include/linux/mm.h
··· 46 46 void mm_core_init(void); 47 47 void init_mm_internals(void); 48 48 49 - #ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */ 50 - extern unsigned long max_mapnr; 51 - 52 - static inline void set_max_mapnr(unsigned long limit) 53 - { 54 - max_mapnr = limit; 55 - } 56 - #else 57 - static inline void set_max_mapnr(unsigned long limit) { } 58 - #endif 59 - 60 49 extern atomic_long_t _totalram_pages; 61 50 static inline unsigned long totalram_pages(void) 62 51 {
-8
mm/memory.c
··· 95 95 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. 96 96 #endif 97 97 98 - #ifndef CONFIG_NUMA 99 - unsigned long max_mapnr; 100 - EXPORT_SYMBOL(max_mapnr); 101 - 102 - struct page *mem_map; 103 - EXPORT_SYMBOL(mem_map); 104 - #endif 105 - 106 98 static vm_fault_t do_fault(struct vm_fault *vmf); 107 99 static vm_fault_t do_anonymous_page(struct vm_fault *vmf); 108 100 static bool vmf_pte_changed(struct vm_fault *vmf);
+17 -8
mm/mm_init.c
··· 37 37 38 38 #include <asm/setup.h> 39 39 40 + #ifndef CONFIG_NUMA 41 + unsigned long max_mapnr; 42 + EXPORT_SYMBOL(max_mapnr); 43 + 44 + struct page *mem_map; 45 + EXPORT_SYMBOL(mem_map); 46 + #endif 47 + 40 48 #ifdef CONFIG_DEBUG_MEMORY_INIT 41 49 int __meminitdata mminit_loglevel; 42 50 ··· 1647 1639 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 1648 1640 offset = pgdat->node_start_pfn - start; 1649 1641 /* 1650 - * The zone's endpoints aren't required to be MAX_PAGE_ORDER 1642 + * The zone's endpoints aren't required to be MAX_PAGE_ORDER 1651 1643 * aligned but the node_mem_map endpoints must be in order 1652 1644 * for the buddy allocator to function correctly. 1653 1645 */ ··· 1663 1655 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", 1664 1656 __func__, pgdat->node_id, (unsigned long)pgdat, 1665 1657 (unsigned long)pgdat->node_mem_map); 1666 - #ifndef CONFIG_NUMA 1658 + 1667 1659 /* the global mem_map is just set as node 0's */ 1668 - if (pgdat == NODE_DATA(0)) { 1669 - mem_map = NODE_DATA(0)->node_mem_map; 1670 - if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 1671 - mem_map -= offset; 1672 - } 1673 - #endif 1660 + WARN_ON(pgdat != NODE_DATA(0)); 1661 + 1662 + mem_map = pgdat->node_mem_map; 1663 + if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 1664 + mem_map -= offset; 1665 + 1666 + max_mapnr = end - start; 1674 1667 } 1675 1668 #else 1676 1669 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
-4
mm/nommu.c
··· 44 44 45 45 void *high_memory; 46 46 EXPORT_SYMBOL(high_memory); 47 - struct page *mem_map; 48 - unsigned long max_mapnr; 49 - EXPORT_SYMBOL(max_mapnr); 50 47 unsigned long highest_memmap_pfn; 51 48 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; 52 49 int heap_stack_gap = 0; 53 50 54 51 atomic_long_t mmap_pages_allocated; 55 52 56 - EXPORT_SYMBOL(mem_map); 57 53 58 54 /* list of mapped, potentially shareable regions */ 59 55 static struct kmem_cache *vm_region_jar;