Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch, mm: set high_memory in free_area_init()

high_memory defines upper bound on the directly mapped memory. This bound
is defined by the beginning of ZONE_HIGHMEM when a system has high memory
and by the end of memory otherwise.

All this is known to generic memory management initialization code that
can set high_memory while initializing core mm structures.

Add a generic calculation of high_memory to free_area_init() and remove
per-architecture calculation except for the architectures that set and use
high_memory earlier than that.

Link: https://lkml.kernel.org/r/20250313135003.836600-11-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com> [x86]
Tested-by: Mark Brown <broonie@kernel.org>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Guo Ren (csky) <guoren@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russel King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Mike Rapoport (Microsoft) and committed by
Andrew Morton
e120d1bc 8268af30

+30 -62
-1
arch/alpha/mm/init.c
··· 276 276 void __init 277 277 mem_init(void) 278 278 { 279 - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 280 279 memblock_free_all(); 281 280 } 282 281
-2
arch/arc/mm/init.c
··· 150 150 */ 151 151 max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn; 152 152 153 - high_memory = (void *)(min_high_pfn << PAGE_SHIFT); 154 - 155 153 arch_pfn_offset = min(min_low_pfn, min_high_pfn); 156 154 kmap_init(); 157 155 #endif /* CONFIG_HIGHMEM */
-2
arch/arm64/mm/init.c
··· 309 309 } 310 310 311 311 early_init_fdt_scan_reserved_mem(); 312 - 313 - high_memory = __va(memblock_end_of_DRAM() - 1) + 1; 314 312 } 315 313 316 314 void __init bootmem_init(void)
-1
arch/csky/mm/init.c
··· 47 47 #ifdef CONFIG_HIGHMEM 48 48 unsigned long tmp; 49 49 #endif 50 - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 51 50 52 51 memblock_free_all(); 53 52
-6
arch/hexagon/mm/init.c
··· 100 100 * initial kernel segment table's physical address. 101 101 */ 102 102 init_mm.context.ptbase = __pa(init_mm.pgd); 103 - 104 - /* 105 - * Start of high memory area. Will probably need something more 106 - * fancy if we... get more fancy. 107 - */ 108 - high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT); 109 103 } 110 104 111 105 #ifndef DMA_RESERVE
-1
arch/loongarch/kernel/numa.c
··· 389 389 390 390 void __init mem_init(void) 391 391 { 392 - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 393 392 memblock_free_all(); 394 393 } 395 394
-2
arch/loongarch/mm/init.c
··· 78 78 79 79 void __init mem_init(void) 80 80 { 81 - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 82 - 83 81 memblock_free_all(); 84 82 } 85 83 #endif /* !CONFIG_NUMA */
-2
arch/microblaze/mm/init.c
··· 120 120 121 121 void __init mem_init(void) 122 122 { 123 - high_memory = (void *)__va(memory_start + lowmem_size - 1); 124 - 125 123 /* this will put all memory onto the freelists */ 126 124 memblock_free_all(); 127 125 #ifdef CONFIG_HIGHMEM
-2
arch/mips/mm/init.c
··· 417 417 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; 418 418 } 419 419 #endif 420 - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 421 420 422 421 free_area_init(max_zone_pfns); 423 422 } ··· 468 469 #else /* CONFIG_NUMA */ 469 470 void __init mem_init(void) 470 471 { 471 - high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); 472 472 setup_zero_pages(); /* This comes from node 0 */ 473 473 memblock_free_all(); 474 474 }
-6
arch/nios2/mm/init.c
··· 62 62 63 63 void __init mem_init(void) 64 64 { 65 - unsigned long end_mem = memory_end; /* this must not include 66 - kernel stack at top */ 67 - 68 - end_mem &= PAGE_MASK; 69 - high_memory = __va(end_mem); 70 - 71 65 /* this will put all memory onto the freelists */ 72 66 memblock_free_all(); 73 67 }
-2
arch/openrisc/mm/init.c
··· 193 193 { 194 194 BUG_ON(!mem_map); 195 195 196 - high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); 197 - 198 196 /* clear the zero-page */ 199 197 memset((void *)empty_zero_page, 0, PAGE_SIZE); 200 198
-1
arch/parisc/mm/init.c
··· 562 562 BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000); 563 563 #endif 564 564 565 - high_memory = __va((max_pfn << PAGE_SHIFT)); 566 565 memblock_free_all(); 567 566 568 567 #ifdef CONFIG_PA11
-1
arch/riscv/mm/init.c
··· 295 295 phys_ram_end = memblock_end_of_DRAM(); 296 296 min_low_pfn = PFN_UP(phys_ram_base); 297 297 max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end); 298 - high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); 299 298 300 299 dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); 301 300
-2
arch/s390/mm/init.c
··· 159 159 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); 160 160 cpumask_set_cpu(0, mm_cpumask(&init_mm)); 161 161 162 - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 163 - 164 162 pv_init(); 165 163 kfence_split_mapping(); 166 164
-7
arch/sh/mm/init.c
··· 330 330 331 331 void __init mem_init(void) 332 332 { 333 - pg_data_t *pgdat; 334 - 335 - high_memory = NULL; 336 - for_each_online_pgdat(pgdat) 337 - high_memory = max_t(void *, high_memory, 338 - __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); 339 - 340 333 memblock_free_all(); 341 334 342 335 /* Set this up early, so we can take care of the zero page */
-1
arch/sparc/mm/init_32.c
··· 275 275 276 276 taint_real_pages(); 277 277 278 - high_memory = __va(max_low_pfn << PAGE_SHIFT); 279 278 memblock_free_all(); 280 279 281 280 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
-2
arch/sparc/mm/init_64.c
··· 2505 2505 } 2506 2506 void __init mem_init(void) 2507 2507 { 2508 - high_memory = __va(last_valid_pfn << PAGE_SHIFT); 2509 - 2510 2508 memblock_free_all(); 2511 2509 2512 2510 /*
-1
arch/um/kernel/um_arch.c
··· 385 385 386 386 high_physmem = uml_physmem + physmem_size; 387 387 end_iomem = high_physmem + iomem_size; 388 - high_memory = (void *) end_iomem; 389 388 390 389 start_vm = VMALLOC_START; 391 390
-2
arch/x86/kernel/setup.c
··· 972 972 max_low_pfn = e820__end_of_low_ram_pfn(); 973 973 else 974 974 max_low_pfn = max_pfn; 975 - 976 - high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; 977 975 #endif 978 976 979 977 /* Find and reserve MPTABLE area */
-3
arch/x86/mm/init_32.c
··· 643 643 highstart_pfn = max_low_pfn; 644 644 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", 645 645 pages_to_mb(highend_pfn - highstart_pfn)); 646 - high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; 647 - #else 648 - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; 649 646 #endif 650 647 651 648 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
-3
arch/x86/mm/numa_32.c
··· 41 41 highstart_pfn = max_low_pfn; 42 42 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", 43 43 pages_to_mb(highend_pfn - highstart_pfn)); 44 - high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; 45 - #else 46 - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; 47 44 #endif 48 45 printk(KERN_NOTICE "%ldMB LOWMEM available.\n", 49 46 pages_to_mb(max_low_pfn));
-2
arch/xtensa/mm/init.c
··· 164 164 { 165 165 free_highpages(); 166 166 167 - high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); 168 - 169 167 memblock_free_all(); 170 168 } 171 169
-8
mm/memory.c
··· 114 114 } 115 115 116 116 /* 117 - * A number of key systems in x86 including ioremap() rely on the assumption 118 - * that high_memory defines the upper bound on direct map memory, then end 119 - * of ZONE_NORMAL. 120 - */ 121 - void *high_memory; 122 - EXPORT_SYMBOL(high_memory); 123 - 124 - /* 125 117 * Randomize the address space (stacks, mmaps, brk, etc.). 126 118 * 127 119 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
+30
mm/mm_init.c
··· 45 45 EXPORT_SYMBOL(mem_map); 46 46 #endif 47 47 48 + /* 49 + * high_memory defines the upper bound on direct map memory, then end 50 + * of ZONE_NORMAL. 51 + */ 52 + void *high_memory; 53 + EXPORT_SYMBOL(high_memory); 54 + 48 55 #ifdef CONFIG_DEBUG_MEMORY_INIT 49 56 int __meminitdata mminit_loglevel; 50 57 ··· 1785 1778 return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40); 1786 1779 } 1787 1780 1781 + static void set_high_memory(void) 1782 + { 1783 + phys_addr_t highmem = memblock_end_of_DRAM(); 1784 + 1785 + /* 1786 + * Some architectures (e.g. ARM) set high_memory very early and 1787 + * use it in arch setup code. 1788 + * If an architecture already set high_memory don't overwrite it 1789 + */ 1790 + if (high_memory) 1791 + return; 1792 + 1793 + #ifdef CONFIG_HIGHMEM 1794 + if (arch_has_descending_max_zone_pfns() || 1795 + highmem > PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM])) 1796 + highmem = PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]); 1797 + #endif 1798 + 1799 + high_memory = phys_to_virt(highmem - 1) + 1; 1800 + } 1801 + 1788 1802 /** 1789 1803 * free_area_init - Initialise all pg_data_t and zone data 1790 1804 * @max_zone_pfn: an array of max PFNs for each zone ··· 1928 1900 1929 1901 /* disable hash distribution for systems with a single node */ 1930 1902 fixup_hashdist(); 1903 + 1904 + set_high_memory(); 1931 1905 } 1932 1906 1933 1907 /**
-2
mm/nommu.c
··· 42 42 #include <asm/mmu_context.h> 43 43 #include "internal.h" 44 44 45 - void *high_memory; 46 - EXPORT_SYMBOL(high_memory); 47 45 unsigned long highest_memmap_pfn; 48 46 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; 49 47 int heap_stack_gap = 0;