Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/mm/init.c
4 *
5 * Copyright (C) 1995-2005 Russell King
6 */
7#include <linux/kernel.h>
8#include <linux/errno.h>
9#include <linux/swap.h>
10#include <linux/init.h>
11#include <linux/mman.h>
12#include <linux/sched/signal.h>
13#include <linux/sched/task.h>
14#include <linux/export.h>
15#include <linux/nodemask.h>
16#include <linux/initrd.h>
17#include <linux/of_fdt.h>
18#include <linux/highmem.h>
19#include <linux/gfp.h>
20#include <linux/memblock.h>
21#include <linux/dma-map-ops.h>
22#include <linux/sizes.h>
23#include <linux/stop_machine.h>
24#include <linux/swiotlb.h>
25
26#include <asm/cp15.h>
27#include <asm/mach-types.h>
28#include <asm/memblock.h>
29#include <asm/memory.h>
30#include <asm/prom.h>
31#include <asm/sections.h>
32#include <asm/setup.h>
33#include <asm/set_memory.h>
34#include <asm/system_info.h>
35#include <asm/tlb.h>
36#include <asm/fixmap.h>
37#include <asm/ptdump.h>
38
39#include <asm/mach/arch.h>
40#include <asm/mach/map.h>
41
42#include "mm.h"
43
44#ifdef CONFIG_CPU_CP15_MMU
45unsigned long __init __clear_cr(unsigned long mask)
46{
47 cr_alignment = cr_alignment & ~mask;
48 return cr_alignment;
49}
50#endif
51
52#ifdef CONFIG_BLK_DEV_INITRD
53static int __init parse_tag_initrd(const struct tag *tag)
54{
55 pr_warn("ATAG_INITRD is deprecated; "
56 "please update your bootloader.\n");
57 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
58 phys_initrd_size = tag->u.initrd.size;
59 return 0;
60}
61
62__tagtable(ATAG_INITRD, parse_tag_initrd);
63
64static int __init parse_tag_initrd2(const struct tag *tag)
65{
66 phys_initrd_start = tag->u.initrd.start;
67 phys_initrd_size = tag->u.initrd.size;
68 return 0;
69}
70
71__tagtable(ATAG_INITRD2, parse_tag_initrd2);
72#endif
73
74static void __init find_limits(unsigned long *min, unsigned long *max_low,
75 unsigned long *max_high)
76{
77 *max_low = PFN_DOWN(memblock_get_current_limit());
78 *min = PFN_UP(memblock_start_of_DRAM());
79 *max_high = PFN_DOWN(memblock_end_of_DRAM());
80}
81
82#ifdef CONFIG_ZONE_DMA
83
84phys_addr_t arm_dma_zone_size __read_mostly;
85EXPORT_SYMBOL(arm_dma_zone_size);
86
87/*
88 * The DMA mask corresponding to the maximum bus address allocatable
89 * using GFP_DMA. The default here places no restriction on DMA
90 * allocations. This must be the smallest DMA mask in the system,
91 * so a successful GFP_DMA allocation will always satisfy this.
92 */
93phys_addr_t arm_dma_limit;
94unsigned long arm_dma_pfn_limit;
95#endif
96
97void __init setup_dma_zone(const struct machine_desc *mdesc)
98{
99#ifdef CONFIG_ZONE_DMA
100 if (mdesc->dma_zone_size) {
101 arm_dma_zone_size = mdesc->dma_zone_size;
102 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
103 } else
104 arm_dma_limit = 0xffffffff;
105 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
106#endif
107}
108
109static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
110 unsigned long max_high)
111{
112 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
113
114#ifdef CONFIG_ZONE_DMA
115 max_zone_pfn[ZONE_DMA] = min(arm_dma_pfn_limit, max_low);
116#endif
117 max_zone_pfn[ZONE_NORMAL] = max_low;
118#ifdef CONFIG_HIGHMEM
119 max_zone_pfn[ZONE_HIGHMEM] = max_high;
120#endif
121 free_area_init(max_zone_pfn);
122}
123
124#ifdef CONFIG_HAVE_ARCH_PFN_VALID
125int pfn_valid(unsigned long pfn)
126{
127 phys_addr_t addr = __pfn_to_phys(pfn);
128 unsigned long pageblock_size = PAGE_SIZE * pageblock_nr_pages;
129
130 if (__phys_to_pfn(addr) != pfn)
131 return 0;
132
133 /*
134 * If address less than pageblock_size bytes away from a present
135 * memory chunk there still will be a memory map entry for it
136 * because we round freed memory map to the pageblock boundaries.
137 */
138 if (memblock_overlaps_region(&memblock.memory,
139 ALIGN_DOWN(addr, pageblock_size),
140 pageblock_size))
141 return 1;
142
143 return 0;
144}
145EXPORT_SYMBOL(pfn_valid);
146#endif
147
148static bool arm_memblock_steal_permitted = true;
149
150phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
151{
152 phys_addr_t phys;
153
154 BUG_ON(!arm_memblock_steal_permitted);
155
156 phys = memblock_phys_alloc(size, align);
157 if (!phys)
158 panic("Failed to steal %pa bytes at %pS\n",
159 &size, (void *)_RET_IP_);
160
161 memblock_phys_free(phys, size);
162 memblock_remove(phys, size);
163
164 return phys;
165}
166
167#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
168void check_cpu_icache_size(int cpuid)
169{
170 u32 size, ctr;
171
172 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
173
174 size = 1 << ((ctr & 0xf) + 2);
175 if (cpuid != 0 && icache_size != size)
176 pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
177 cpuid);
178 if (icache_size > size)
179 icache_size = size;
180}
181#endif
182
183void __init arm_memblock_init(const struct machine_desc *mdesc)
184{
185 /* Register the kernel text, kernel data and initrd with memblock. */
186 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
187
188 reserve_initrd_mem();
189
190 arm_mm_memblock_reserve();
191
192 /* reserve any platform specific memblock areas */
193 if (mdesc->reserve)
194 mdesc->reserve();
195
196 early_init_fdt_scan_reserved_mem();
197
198 /* reserve memory for DMA contiguous allocations */
199 dma_contiguous_reserve(arm_dma_limit);
200
201 arm_memblock_steal_permitted = false;
202 memblock_dump_all();
203}
204
205void __init bootmem_init(void)
206{
207 memblock_allow_resize();
208
209 find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
210
211 early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
212 (phys_addr_t)max_low_pfn << PAGE_SHIFT);
213
214 /*
215 * sparse_init() tries to allocate memory from memblock, so must be
216 * done after the fixed reservations
217 */
218 sparse_init();
219
220 /*
221 * Now free the memory - free_area_init needs
222 * the sparse mem_map arrays initialized by sparse_init()
223 * for memmap_init_zone(), otherwise all PFNs are invalid.
224 */
225 zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
226}
227
228/*
229 * Poison init memory with an undefined instruction (ARM) or a branch to an
230 * undefined instruction (Thumb).
231 */
232static inline void poison_init_mem(void *s, size_t count)
233{
234 u32 *p = (u32 *)s;
235 for (; count != 0; count -= 4)
236 *p++ = 0xe7fddef0;
237}
238
239static void __init free_highpages(void)
240{
241#ifdef CONFIG_HIGHMEM
242 unsigned long max_low = max_low_pfn;
243 phys_addr_t range_start, range_end;
244 u64 i;
245
246 /* set highmem page free */
247 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
248 &range_start, &range_end, NULL) {
249 unsigned long start = PFN_UP(range_start);
250 unsigned long end = PFN_DOWN(range_end);
251
252 /* Ignore complete lowmem entries */
253 if (end <= max_low)
254 continue;
255
256 /* Truncate partial highmem entries */
257 if (start < max_low)
258 start = max_low;
259
260 for (; start < end; start++)
261 free_highmem_page(pfn_to_page(start));
262 }
263#endif
264}
265
266/*
267 * mem_init() marks the free areas in the mem_map and tells us how much
268 * memory is free. This is done after various parts of the system have
269 * claimed their memory after the kernel image.
270 */
271void __init mem_init(void)
272{
273#ifdef CONFIG_ARM_LPAE
274 if (swiotlb_force == SWIOTLB_FORCE ||
275 max_pfn > arm_dma_pfn_limit)
276 swiotlb_init(1);
277 else
278 swiotlb_force = SWIOTLB_NO_FORCE;
279#endif
280
281 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
282
283 /* this will put all unused low memory onto the freelists */
284 memblock_free_all();
285
286#ifdef CONFIG_SA1111
287 /* now that our DMA memory is actually so designated, we can free it */
288 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
289#endif
290
291 free_highpages();
292
293 /*
294 * Check boundaries twice: Some fundamental inconsistencies can
295 * be detected at build time already.
296 */
297#ifdef CONFIG_MMU
298 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
299 BUG_ON(TASK_SIZE > MODULES_VADDR);
300#endif
301
302#ifdef CONFIG_HIGHMEM
303 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
304 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
305#endif
306}
307
308#ifdef CONFIG_STRICT_KERNEL_RWX
309struct section_perm {
310 const char *name;
311 unsigned long start;
312 unsigned long end;
313 pmdval_t mask;
314 pmdval_t prot;
315 pmdval_t clear;
316};
317
318/* First section-aligned location at or after __start_rodata. */
319extern char __start_rodata_section_aligned[];
320
321static struct section_perm nx_perms[] = {
322 /* Make pages tables, etc before _stext RW (set NX). */
323 {
324 .name = "pre-text NX",
325 .start = PAGE_OFFSET,
326 .end = (unsigned long)_stext,
327 .mask = ~PMD_SECT_XN,
328 .prot = PMD_SECT_XN,
329 },
330 /* Make init RW (set NX). */
331 {
332 .name = "init NX",
333 .start = (unsigned long)__init_begin,
334 .end = (unsigned long)_sdata,
335 .mask = ~PMD_SECT_XN,
336 .prot = PMD_SECT_XN,
337 },
338 /* Make rodata NX (set RO in ro_perms below). */
339 {
340 .name = "rodata NX",
341 .start = (unsigned long)__start_rodata_section_aligned,
342 .end = (unsigned long)__init_begin,
343 .mask = ~PMD_SECT_XN,
344 .prot = PMD_SECT_XN,
345 },
346};
347
348static struct section_perm ro_perms[] = {
349 /* Make kernel code and rodata RX (set RO). */
350 {
351 .name = "text/rodata RO",
352 .start = (unsigned long)_stext,
353 .end = (unsigned long)__init_begin,
354#ifdef CONFIG_ARM_LPAE
355 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
356 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
357#else
358 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
359 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
360 .clear = PMD_SECT_AP_WRITE,
361#endif
362 },
363};
364
365/*
366 * Updates section permissions only for the current mm (sections are
367 * copied into each mm). During startup, this is the init_mm. Is only
368 * safe to be called with preemption disabled, as under stop_machine().
369 */
370static inline void section_update(unsigned long addr, pmdval_t mask,
371 pmdval_t prot, struct mm_struct *mm)
372{
373 pmd_t *pmd;
374
375 pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
376
377#ifdef CONFIG_ARM_LPAE
378 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
379#else
380 if (addr & SECTION_SIZE)
381 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
382 else
383 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
384#endif
385 flush_pmd_entry(pmd);
386 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
387}
388
389/* Make sure extended page tables are in use. */
390static inline bool arch_has_strict_perms(void)
391{
392 if (cpu_architecture() < CPU_ARCH_ARMv6)
393 return false;
394
395 return !!(get_cr() & CR_XP);
396}
397
398static void set_section_perms(struct section_perm *perms, int n, bool set,
399 struct mm_struct *mm)
400{
401 size_t i;
402 unsigned long addr;
403
404 if (!arch_has_strict_perms())
405 return;
406
407 for (i = 0; i < n; i++) {
408 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
409 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
410 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
411 perms[i].name, perms[i].start, perms[i].end,
412 SECTION_SIZE);
413 continue;
414 }
415
416 for (addr = perms[i].start;
417 addr < perms[i].end;
418 addr += SECTION_SIZE)
419 section_update(addr, perms[i].mask,
420 set ? perms[i].prot : perms[i].clear, mm);
421 }
422
423}
424
425/**
426 * update_sections_early intended to be called only through stop_machine
427 * framework and executed by only one CPU while all other CPUs will spin and
428 * wait, so no locking is required in this function.
429 */
430static void update_sections_early(struct section_perm perms[], int n)
431{
432 struct task_struct *t, *s;
433
434 for_each_process(t) {
435 if (t->flags & PF_KTHREAD)
436 continue;
437 for_each_thread(t, s)
438 if (s->mm)
439 set_section_perms(perms, n, true, s->mm);
440 }
441 set_section_perms(perms, n, true, current->active_mm);
442 set_section_perms(perms, n, true, &init_mm);
443}
444
445static int __fix_kernmem_perms(void *unused)
446{
447 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
448 return 0;
449}
450
451static void fix_kernmem_perms(void)
452{
453 stop_machine(__fix_kernmem_perms, NULL, NULL);
454}
455
456static int __mark_rodata_ro(void *unused)
457{
458 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
459 return 0;
460}
461
462void mark_rodata_ro(void)
463{
464 stop_machine(__mark_rodata_ro, NULL, NULL);
465 debug_checkwx();
466}
467
468#else
469static inline void fix_kernmem_perms(void) { }
470#endif /* CONFIG_STRICT_KERNEL_RWX */
471
472void free_initmem(void)
473{
474 fix_kernmem_perms();
475
476 poison_init_mem(__init_begin, __init_end - __init_begin);
477 if (!machine_is_integrator() && !machine_is_cintegrator())
478 free_initmem_default(-1);
479}
480
481#ifdef CONFIG_BLK_DEV_INITRD
482void free_initrd_mem(unsigned long start, unsigned long end)
483{
484 if (start == initrd_start)
485 start = round_down(start, PAGE_SIZE);
486 if (end == initrd_end)
487 end = round_up(end, PAGE_SIZE);
488
489 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
490 free_reserved_area((void *)start, (void *)end, -1, "initrd");
491}
492#endif