Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v5.11 541 lines 13 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * linux/arch/arm/mm/init.c 4 * 5 * Copyright (C) 1995-2005 Russell King 6 */ 7#include <linux/kernel.h> 8#include <linux/errno.h> 9#include <linux/swap.h> 10#include <linux/init.h> 11#include <linux/mman.h> 12#include <linux/sched/signal.h> 13#include <linux/sched/task.h> 14#include <linux/export.h> 15#include <linux/nodemask.h> 16#include <linux/initrd.h> 17#include <linux/of_fdt.h> 18#include <linux/highmem.h> 19#include <linux/gfp.h> 20#include <linux/memblock.h> 21#include <linux/dma-map-ops.h> 22#include <linux/sizes.h> 23#include <linux/stop_machine.h> 24#include <linux/swiotlb.h> 25 26#include <asm/cp15.h> 27#include <asm/mach-types.h> 28#include <asm/memblock.h> 29#include <asm/memory.h> 30#include <asm/prom.h> 31#include <asm/sections.h> 32#include <asm/setup.h> 33#include <asm/set_memory.h> 34#include <asm/system_info.h> 35#include <asm/tlb.h> 36#include <asm/fixmap.h> 37#include <asm/ptdump.h> 38 39#include <asm/mach/arch.h> 40#include <asm/mach/map.h> 41 42#include "mm.h" 43 44#ifdef CONFIG_CPU_CP15_MMU 45unsigned long __init __clear_cr(unsigned long mask) 46{ 47 cr_alignment = cr_alignment & ~mask; 48 return cr_alignment; 49} 50#endif 51 52#ifdef CONFIG_BLK_DEV_INITRD 53static int __init parse_tag_initrd(const struct tag *tag) 54{ 55 pr_warn("ATAG_INITRD is deprecated; " 56 "please update your bootloader.\n"); 57 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 58 phys_initrd_size = tag->u.initrd.size; 59 return 0; 60} 61 62__tagtable(ATAG_INITRD, parse_tag_initrd); 63 64static int __init parse_tag_initrd2(const struct tag *tag) 65{ 66 phys_initrd_start = tag->u.initrd.start; 67 phys_initrd_size = tag->u.initrd.size; 68 return 0; 69} 70 71__tagtable(ATAG_INITRD2, parse_tag_initrd2); 72#endif 73 74static void __init find_limits(unsigned long *min, unsigned long *max_low, 75 unsigned long *max_high) 76{ 77 *max_low = PFN_DOWN(memblock_get_current_limit()); 78 *min = PFN_UP(memblock_start_of_DRAM()); 79 *max_high = PFN_DOWN(memblock_end_of_DRAM()); 80} 81 82#ifdef CONFIG_ZONE_DMA 83 84phys_addr_t arm_dma_zone_size __read_mostly; 85EXPORT_SYMBOL(arm_dma_zone_size); 86 87/* 88 * The DMA mask corresponding to the maximum bus address allocatable 89 * using GFP_DMA. The default here places no restriction on DMA 90 * allocations. This must be the smallest DMA mask in the system, 91 * so a successful GFP_DMA allocation will always satisfy this. 92 */ 93phys_addr_t arm_dma_limit; 94unsigned long arm_dma_pfn_limit; 95#endif 96 97void __init setup_dma_zone(const struct machine_desc *mdesc) 98{ 99#ifdef CONFIG_ZONE_DMA 100 if (mdesc->dma_zone_size) { 101 arm_dma_zone_size = mdesc->dma_zone_size; 102 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 103 } else 104 arm_dma_limit = 0xffffffff; 105 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; 106#endif 107} 108 109static void __init zone_sizes_init(unsigned long min, unsigned long max_low, 110 unsigned long max_high) 111{ 112 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; 113 114#ifdef CONFIG_ZONE_DMA 115 max_zone_pfn[ZONE_DMA] = min(arm_dma_pfn_limit, max_low); 116#endif 117 max_zone_pfn[ZONE_NORMAL] = max_low; 118#ifdef CONFIG_HIGHMEM 119 max_zone_pfn[ZONE_HIGHMEM] = max_high; 120#endif 121 free_area_init(max_zone_pfn); 122} 123 124#ifdef CONFIG_HAVE_ARCH_PFN_VALID 125int pfn_valid(unsigned long pfn) 126{ 127 phys_addr_t addr = __pfn_to_phys(pfn); 128 129 if (__phys_to_pfn(addr) != pfn) 130 return 0; 131 132 return memblock_is_map_memory(addr); 133} 134EXPORT_SYMBOL(pfn_valid); 135#endif 136 137static bool arm_memblock_steal_permitted = true; 138 139phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) 140{ 141 phys_addr_t phys; 142 143 BUG_ON(!arm_memblock_steal_permitted); 144 145 phys = memblock_phys_alloc(size, align); 146 if (!phys) 147 panic("Failed to steal %pa bytes at %pS\n", 148 &size, (void *)_RET_IP_); 149 150 memblock_free(phys, size); 151 memblock_remove(phys, size); 152 153 return phys; 154} 155 156static void __init arm_initrd_init(void) 157{ 158#ifdef CONFIG_BLK_DEV_INITRD 159 phys_addr_t start; 160 unsigned long size; 161 162 initrd_start = initrd_end = 0; 163 164 if (!phys_initrd_size) 165 return; 166 167 /* 168 * Round the memory region to page boundaries as per free_initrd_mem() 169 * This allows us to detect whether the pages overlapping the initrd 170 * are in use, but more importantly, reserves the entire set of pages 171 * as we don't want these pages allocated for other purposes. 172 */ 173 start = round_down(phys_initrd_start, PAGE_SIZE); 174 size = phys_initrd_size + (phys_initrd_start - start); 175 size = round_up(size, PAGE_SIZE); 176 177 if (!memblock_is_region_memory(start, size)) { 178 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", 179 (u64)start, size); 180 return; 181 } 182 183 if (memblock_is_region_reserved(start, size)) { 184 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", 185 (u64)start, size); 186 return; 187 } 188 189 memblock_reserve(start, size); 190 191 /* Now convert initrd to virtual addresses */ 192 initrd_start = __phys_to_virt(phys_initrd_start); 193 initrd_end = initrd_start + phys_initrd_size; 194#endif 195} 196 197#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND 198void check_cpu_icache_size(int cpuid) 199{ 200 u32 size, ctr; 201 202 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr)); 203 204 size = 1 << ((ctr & 0xf) + 2); 205 if (cpuid != 0 && icache_size != size) 206 pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n", 207 cpuid); 208 if (icache_size > size) 209 icache_size = size; 210} 211#endif 212 213void __init arm_memblock_init(const struct machine_desc *mdesc) 214{ 215 /* Register the kernel text, kernel data and initrd with memblock. */ 216 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START); 217 218 arm_initrd_init(); 219 220 arm_mm_memblock_reserve(); 221 222 /* reserve any platform specific memblock areas */ 223 if (mdesc->reserve) 224 mdesc->reserve(); 225 226 early_init_fdt_scan_reserved_mem(); 227 228 /* reserve memory for DMA contiguous allocations */ 229 dma_contiguous_reserve(arm_dma_limit); 230 231 arm_memblock_steal_permitted = false; 232 memblock_dump_all(); 233} 234 235void __init bootmem_init(void) 236{ 237 memblock_allow_resize(); 238 239 find_limits(&min_low_pfn, &max_low_pfn, &max_pfn); 240 241 early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT, 242 (phys_addr_t)max_low_pfn << PAGE_SHIFT); 243 244 /* 245 * sparse_init() tries to allocate memory from memblock, so must be 246 * done after the fixed reservations 247 */ 248 sparse_init(); 249 250 /* 251 * Now free the memory - free_area_init needs 252 * the sparse mem_map arrays initialized by sparse_init() 253 * for memmap_init_zone(), otherwise all PFNs are invalid. 254 */ 255 zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn); 256} 257 258/* 259 * Poison init memory with an undefined instruction (ARM) or a branch to an 260 * undefined instruction (Thumb). 261 */ 262static inline void poison_init_mem(void *s, size_t count) 263{ 264 u32 *p = (u32 *)s; 265 for (; count != 0; count -= 4) 266 *p++ = 0xe7fddef0; 267} 268 269static void __init free_highpages(void) 270{ 271#ifdef CONFIG_HIGHMEM 272 unsigned long max_low = max_low_pfn; 273 phys_addr_t range_start, range_end; 274 u64 i; 275 276 /* set highmem page free */ 277 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, 278 &range_start, &range_end, NULL) { 279 unsigned long start = PFN_UP(range_start); 280 unsigned long end = PFN_DOWN(range_end); 281 282 /* Ignore complete lowmem entries */ 283 if (end <= max_low) 284 continue; 285 286 /* Truncate partial highmem entries */ 287 if (start < max_low) 288 start = max_low; 289 290 for (; start < end; start++) 291 free_highmem_page(pfn_to_page(start)); 292 } 293#endif 294} 295 296/* 297 * mem_init() marks the free areas in the mem_map and tells us how much 298 * memory is free. This is done after various parts of the system have 299 * claimed their memory after the kernel image. 300 */ 301void __init mem_init(void) 302{ 303#ifdef CONFIG_ARM_LPAE 304 swiotlb_init(1); 305#endif 306 307 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 308 309 /* this will put all unused low memory onto the freelists */ 310 memblock_free_all(); 311 312#ifdef CONFIG_SA1111 313 /* now that our DMA memory is actually so designated, we can free it */ 314 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); 315#endif 316 317 free_highpages(); 318 319 mem_init_print_info(NULL); 320 321 /* 322 * Check boundaries twice: Some fundamental inconsistencies can 323 * be detected at build time already. 324 */ 325#ifdef CONFIG_MMU 326 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); 327 BUG_ON(TASK_SIZE > MODULES_VADDR); 328#endif 329 330#ifdef CONFIG_HIGHMEM 331 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 332 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 333#endif 334} 335 336#ifdef CONFIG_STRICT_KERNEL_RWX 337struct section_perm { 338 const char *name; 339 unsigned long start; 340 unsigned long end; 341 pmdval_t mask; 342 pmdval_t prot; 343 pmdval_t clear; 344}; 345 346/* First section-aligned location at or after __start_rodata. */ 347extern char __start_rodata_section_aligned[]; 348 349static struct section_perm nx_perms[] = { 350 /* Make pages tables, etc before _stext RW (set NX). */ 351 { 352 .name = "pre-text NX", 353 .start = PAGE_OFFSET, 354 .end = (unsigned long)_stext, 355 .mask = ~PMD_SECT_XN, 356 .prot = PMD_SECT_XN, 357 }, 358 /* Make init RW (set NX). */ 359 { 360 .name = "init NX", 361 .start = (unsigned long)__init_begin, 362 .end = (unsigned long)_sdata, 363 .mask = ~PMD_SECT_XN, 364 .prot = PMD_SECT_XN, 365 }, 366 /* Make rodata NX (set RO in ro_perms below). */ 367 { 368 .name = "rodata NX", 369 .start = (unsigned long)__start_rodata_section_aligned, 370 .end = (unsigned long)__init_begin, 371 .mask = ~PMD_SECT_XN, 372 .prot = PMD_SECT_XN, 373 }, 374}; 375 376static struct section_perm ro_perms[] = { 377 /* Make kernel code and rodata RX (set RO). */ 378 { 379 .name = "text/rodata RO", 380 .start = (unsigned long)_stext, 381 .end = (unsigned long)__init_begin, 382#ifdef CONFIG_ARM_LPAE 383 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2), 384 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2, 385#else 386 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), 387 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, 388 .clear = PMD_SECT_AP_WRITE, 389#endif 390 }, 391}; 392 393/* 394 * Updates section permissions only for the current mm (sections are 395 * copied into each mm). During startup, this is the init_mm. Is only 396 * safe to be called with preemption disabled, as under stop_machine(). 397 */ 398static inline void section_update(unsigned long addr, pmdval_t mask, 399 pmdval_t prot, struct mm_struct *mm) 400{ 401 pmd_t *pmd; 402 403 pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr); 404 405#ifdef CONFIG_ARM_LPAE 406 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 407#else 408 if (addr & SECTION_SIZE) 409 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot); 410 else 411 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 412#endif 413 flush_pmd_entry(pmd); 414 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE); 415} 416 417/* Make sure extended page tables are in use. */ 418static inline bool arch_has_strict_perms(void) 419{ 420 if (cpu_architecture() < CPU_ARCH_ARMv6) 421 return false; 422 423 return !!(get_cr() & CR_XP); 424} 425 426static void set_section_perms(struct section_perm *perms, int n, bool set, 427 struct mm_struct *mm) 428{ 429 size_t i; 430 unsigned long addr; 431 432 if (!arch_has_strict_perms()) 433 return; 434 435 for (i = 0; i < n; i++) { 436 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || 437 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { 438 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n", 439 perms[i].name, perms[i].start, perms[i].end, 440 SECTION_SIZE); 441 continue; 442 } 443 444 for (addr = perms[i].start; 445 addr < perms[i].end; 446 addr += SECTION_SIZE) 447 section_update(addr, perms[i].mask, 448 set ? perms[i].prot : perms[i].clear, mm); 449 } 450 451} 452 453/** 454 * update_sections_early intended to be called only through stop_machine 455 * framework and executed by only one CPU while all other CPUs will spin and 456 * wait, so no locking is required in this function. 457 */ 458static void update_sections_early(struct section_perm perms[], int n) 459{ 460 struct task_struct *t, *s; 461 462 for_each_process(t) { 463 if (t->flags & PF_KTHREAD) 464 continue; 465 for_each_thread(t, s) 466 if (s->mm) 467 set_section_perms(perms, n, true, s->mm); 468 } 469 set_section_perms(perms, n, true, current->active_mm); 470 set_section_perms(perms, n, true, &init_mm); 471} 472 473static int __fix_kernmem_perms(void *unused) 474{ 475 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms)); 476 return 0; 477} 478 479static void fix_kernmem_perms(void) 480{ 481 stop_machine(__fix_kernmem_perms, NULL, NULL); 482} 483 484static int __mark_rodata_ro(void *unused) 485{ 486 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms)); 487 return 0; 488} 489 490static int kernel_set_to_readonly __read_mostly; 491 492void mark_rodata_ro(void) 493{ 494 kernel_set_to_readonly = 1; 495 stop_machine(__mark_rodata_ro, NULL, NULL); 496 debug_checkwx(); 497} 498 499void set_kernel_text_rw(void) 500{ 501 if (!kernel_set_to_readonly) 502 return; 503 504 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, 505 current->active_mm); 506} 507 508void set_kernel_text_ro(void) 509{ 510 if (!kernel_set_to_readonly) 511 return; 512 513 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, 514 current->active_mm); 515} 516 517#else 518static inline void fix_kernmem_perms(void) { } 519#endif /* CONFIG_STRICT_KERNEL_RWX */ 520 521void free_initmem(void) 522{ 523 fix_kernmem_perms(); 524 525 poison_init_mem(__init_begin, __init_end - __init_begin); 526 if (!machine_is_integrator() && !machine_is_cintegrator()) 527 free_initmem_default(-1); 528} 529 530#ifdef CONFIG_BLK_DEV_INITRD 531void free_initrd_mem(unsigned long start, unsigned long end) 532{ 533 if (start == initrd_start) 534 start = round_down(start, PAGE_SIZE); 535 if (end == initrd_end) 536 end = round_up(end, PAGE_SIZE); 537 538 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 539 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 540} 541#endif