Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
8 * Copyright (C) 1996 Paul Mackerras
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 */
14
15#include <linux/export.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/gfp.h>
21#include <linux/types.h>
22#include <linux/mm.h>
23#include <linux/stddef.h>
24#include <linux/init.h>
25#include <linux/memblock.h>
26#include <linux/highmem.h>
27#include <linux/initrd.h>
28#include <linux/pagemap.h>
29#include <linux/suspend.h>
30#include <linux/hugetlb.h>
31#include <linux/slab.h>
32#include <linux/vmalloc.h>
33#include <linux/memremap.h>
34
35#include <asm/pgalloc.h>
36#include <asm/prom.h>
37#include <asm/io.h>
38#include <asm/mmu_context.h>
39#include <asm/pgtable.h>
40#include <asm/mmu.h>
41#include <asm/smp.h>
42#include <asm/machdep.h>
43#include <asm/btext.h>
44#include <asm/tlb.h>
45#include <asm/sections.h>
46#include <asm/sparsemem.h>
47#include <asm/vdso.h>
48#include <asm/fixmap.h>
49#include <asm/swiotlb.h>
50#include <asm/rtas.h>
51
52#include <mm/mmu_decl.h>
53
54#ifndef CPU_FTR_COHERENT_ICACHE
55#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
56#define CPU_FTR_NOEXECUTE 0
57#endif
58
59unsigned long long memory_limit;
60bool init_mem_is_free;
61
62#ifdef CONFIG_HIGHMEM
63pte_t *kmap_pte;
64EXPORT_SYMBOL(kmap_pte);
65pgprot_t kmap_prot;
66EXPORT_SYMBOL(kmap_prot);
67
68static inline pte_t *virt_to_kpte(unsigned long vaddr)
69{
70 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
71 vaddr), vaddr), vaddr);
72}
73#endif
74
75pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
76 unsigned long size, pgprot_t vma_prot)
77{
78 if (ppc_md.phys_mem_access_prot)
79 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
80
81 if (!page_is_ram(pfn))
82 vma_prot = pgprot_noncached(vma_prot);
83
84 return vma_prot;
85}
86EXPORT_SYMBOL(phys_mem_access_prot);
87
88#ifdef CONFIG_MEMORY_HOTPLUG
89
90#ifdef CONFIG_NUMA
91int memory_add_physaddr_to_nid(u64 start)
92{
93 return hot_add_scn_to_nid(start);
94}
95#endif
96
97int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
98{
99 return -ENODEV;
100}
101
102int __weak remove_section_mapping(unsigned long start, unsigned long end)
103{
104 return -ENODEV;
105}
106
107int __ref arch_add_memory(int nid, u64 start, u64 size,
108 struct mhp_restrictions *restrictions)
109{
110 unsigned long start_pfn = start >> PAGE_SHIFT;
111 unsigned long nr_pages = size >> PAGE_SHIFT;
112 int rc;
113
114 resize_hpt_for_hotplug(memblock_phys_mem_size());
115
116 start = (unsigned long)__va(start);
117 rc = create_section_mapping(start, start + size, nid);
118 if (rc) {
119 pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
120 start, start + size, rc);
121 return -EFAULT;
122 }
123 flush_inval_dcache_range(start, start + size);
124
125 return __add_pages(nid, start_pfn, nr_pages, restrictions);
126}
127
128#ifdef CONFIG_MEMORY_HOTREMOVE
129void __ref arch_remove_memory(int nid, u64 start, u64 size,
130 struct vmem_altmap *altmap)
131{
132 unsigned long start_pfn = start >> PAGE_SHIFT;
133 unsigned long nr_pages = size >> PAGE_SHIFT;
134 struct page *page;
135 int ret;
136
137 /*
138 * If we have an altmap then we need to skip over any reserved PFNs
139 * when querying the zone.
140 */
141 page = pfn_to_page(start_pfn);
142 if (altmap)
143 page += vmem_altmap_offset(altmap);
144
145 __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
146
147 /* Remove htab bolted mappings for this section of memory */
148 start = (unsigned long)__va(start);
149 flush_inval_dcache_range(start, start + size);
150 ret = remove_section_mapping(start, start + size);
151 WARN_ON_ONCE(ret);
152
153 /* Ensure all vmalloc mappings are flushed in case they also
154 * hit that section of memory
155 */
156 vm_unmap_aliases();
157
158 if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
159 pr_warn("Hash collision while resizing HPT\n");
160}
161#endif
162#endif /* CONFIG_MEMORY_HOTPLUG */
163
164#ifndef CONFIG_NEED_MULTIPLE_NODES
165void __init mem_topology_setup(void)
166{
167 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
168 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
169#ifdef CONFIG_HIGHMEM
170 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
171#endif
172
173 /* Place all memblock_regions in the same node and merge contiguous
174 * memblock_regions
175 */
176 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
177}
178
179void __init initmem_init(void)
180{
181 /* XXX need to clip this if using highmem? */
182 sparse_memory_present_with_active_regions(0);
183 sparse_init();
184}
185
186/* mark pages that don't exist as nosave */
187static int __init mark_nonram_nosave(void)
188{
189 struct memblock_region *reg, *prev = NULL;
190
191 for_each_memblock(memory, reg) {
192 if (prev &&
193 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
194 register_nosave_region(memblock_region_memory_end_pfn(prev),
195 memblock_region_memory_base_pfn(reg));
196 prev = reg;
197 }
198 return 0;
199}
200#else /* CONFIG_NEED_MULTIPLE_NODES */
201static int __init mark_nonram_nosave(void)
202{
203 return 0;
204}
205#endif
206
207/*
208 * Zones usage:
209 *
210 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
211 * everything else. GFP_DMA32 page allocations automatically fall back to
212 * ZONE_DMA.
213 *
214 * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to
215 * inform the generic DMA mapping code. 32-bit only devices (if not handled
216 * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get
217 * otherwise served by ZONE_DMA.
218 */
219static unsigned long max_zone_pfns[MAX_NR_ZONES];
220
221/*
222 * paging_init() sets up the page tables - in fact we've already done this.
223 */
224void __init paging_init(void)
225{
226 unsigned long long total_ram = memblock_phys_mem_size();
227 phys_addr_t top_of_ram = memblock_end_of_DRAM();
228
229#ifdef CONFIG_PPC32
230 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
231 unsigned long end = __fix_to_virt(FIX_HOLE);
232
233 for (; v < end; v += PAGE_SIZE)
234 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
235#endif
236
237#ifdef CONFIG_HIGHMEM
238 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
239 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
240
241 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
242 kmap_prot = PAGE_KERNEL;
243#endif /* CONFIG_HIGHMEM */
244
245 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
246 (unsigned long long)top_of_ram, total_ram);
247 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
248 (long int)((top_of_ram - total_ram) >> 20));
249
250#ifdef CONFIG_ZONE_DMA
251 max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 0x7fffffffUL >> PAGE_SHIFT);
252#endif
253 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
254#ifdef CONFIG_HIGHMEM
255 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
256#endif
257
258 free_area_init_nodes(max_zone_pfns);
259
260 mark_nonram_nosave();
261}
262
263void __init mem_init(void)
264{
265 /*
266 * book3s is limited to 16 page sizes due to encoding this in
267 * a 4-bit field for slices.
268 */
269 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
270
271#ifdef CONFIG_SWIOTLB
272 swiotlb_init(0);
273#endif
274
275 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
276 set_max_mapnr(max_pfn);
277 memblock_free_all();
278
279#ifdef CONFIG_HIGHMEM
280 {
281 unsigned long pfn, highmem_mapnr;
282
283 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
284 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
285 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
286 struct page *page = pfn_to_page(pfn);
287 if (!memblock_is_reserved(paddr))
288 free_highmem_page(page);
289 }
290 }
291#endif /* CONFIG_HIGHMEM */
292
293#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
294 /*
295 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
296 * functions.... do it here for the non-smp case.
297 */
298 per_cpu(next_tlbcam_idx, smp_processor_id()) =
299 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
300#endif
301
302 mem_init_print_info(NULL);
303#ifdef CONFIG_PPC32
304 pr_info("Kernel virtual memory layout:\n");
305#ifdef CONFIG_KASAN
306 pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n",
307 KASAN_SHADOW_START, KASAN_SHADOW_END);
308#endif
309 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
310#ifdef CONFIG_HIGHMEM
311 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
312 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
313#endif /* CONFIG_HIGHMEM */
314#ifdef CONFIG_NOT_COHERENT_CACHE
315 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
316 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
317#endif /* CONFIG_NOT_COHERENT_CACHE */
318 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
319 ioremap_bot, IOREMAP_TOP);
320 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
321 VMALLOC_START, VMALLOC_END);
322#endif /* CONFIG_PPC32 */
323}
324
325void free_initmem(void)
326{
327 ppc_md.progress = ppc_printk_progress;
328 mark_initmem_nx();
329 init_mem_is_free = true;
330 free_initmem_default(POISON_FREE_INITMEM);
331}
332
333/*
334 * This is called when a page has been modified by the kernel.
335 * It just marks the page as not i-cache clean. We do the i-cache
336 * flush later when the page is given to a user process, if necessary.
337 */
338void flush_dcache_page(struct page *page)
339{
340 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
341 return;
342 /* avoid an atomic op if possible */
343 if (test_bit(PG_arch_1, &page->flags))
344 clear_bit(PG_arch_1, &page->flags);
345}
346EXPORT_SYMBOL(flush_dcache_page);
347
348void flush_dcache_icache_page(struct page *page)
349{
350#ifdef CONFIG_HUGETLB_PAGE
351 if (PageCompound(page)) {
352 flush_dcache_icache_hugepage(page);
353 return;
354 }
355#endif
356#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
357 /* On 8xx there is no need to kmap since highmem is not supported */
358 __flush_dcache_icache(page_address(page));
359#else
360 if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
361 void *start = kmap_atomic(page);
362 __flush_dcache_icache(start);
363 kunmap_atomic(start);
364 } else {
365 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
366 }
367#endif
368}
369EXPORT_SYMBOL(flush_dcache_icache_page);
370
371void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
372{
373 clear_page(page);
374
375 /*
376 * We shouldn't have to do this, but some versions of glibc
377 * require it (ld.so assumes zero filled pages are icache clean)
378 * - Anton
379 */
380 flush_dcache_page(pg);
381}
382EXPORT_SYMBOL(clear_user_page);
383
384void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
385 struct page *pg)
386{
387 copy_page(vto, vfrom);
388
389 /*
390 * We should be able to use the following optimisation, however
391 * there are two problems.
392 * Firstly a bug in some versions of binutils meant PLT sections
393 * were not marked executable.
394 * Secondly the first word in the GOT section is blrl, used
395 * to establish the GOT address. Until recently the GOT was
396 * not marked executable.
397 * - Anton
398 */
399#if 0
400 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
401 return;
402#endif
403
404 flush_dcache_page(pg);
405}
406
407void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
408 unsigned long addr, int len)
409{
410 unsigned long maddr;
411
412 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
413 flush_icache_range(maddr, maddr + len);
414 kunmap(page);
415}
416EXPORT_SYMBOL(flush_icache_user_range);
417
418/*
419 * This is called at the end of handling a user page fault, when the
420 * fault has been handled by updating a PTE in the linux page tables.
421 * We use it to preload an HPTE into the hash table corresponding to
422 * the updated linux PTE.
423 *
424 * This must always be called with the pte lock held.
425 */
426void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
427 pte_t *ptep)
428{
429#ifdef CONFIG_PPC_BOOK3S
430 /*
431 * We don't need to worry about _PAGE_PRESENT here because we are
432 * called with either mm->page_table_lock held or ptl lock held
433 */
434 unsigned long trap;
435 bool is_exec;
436
437 if (radix_enabled()) {
438 prefetch((void *)address);
439 return;
440 }
441
442 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
443 if (!pte_young(*ptep) || address >= TASK_SIZE)
444 return;
445
446 /* We try to figure out if we are coming from an instruction
447 * access fault and pass that down to __hash_page so we avoid
448 * double-faulting on execution of fresh text. We have to test
449 * for regs NULL since init will get here first thing at boot
450 *
451 * We also avoid filling the hash if not coming from a fault
452 */
453
454 trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
455 switch (trap) {
456 case 0x300:
457 is_exec = false;
458 break;
459 case 0x400:
460 is_exec = true;
461 break;
462 default:
463 return;
464 }
465
466 hash_preload(vma->vm_mm, address, is_exec, trap);
467#endif /* CONFIG_PPC_BOOK3S */
468#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
469 && defined(CONFIG_HUGETLB_PAGE)
470 if (is_vm_hugetlb_page(vma))
471 book3e_hugetlb_preload(vma, address, *ptep);
472#endif
473}
474
475/*
476 * System memory should not be in /proc/iomem but various tools expect it
477 * (eg kdump).
478 */
479static int __init add_system_ram_resources(void)
480{
481 struct memblock_region *reg;
482
483 for_each_memblock(memory, reg) {
484 struct resource *res;
485 unsigned long base = reg->base;
486 unsigned long size = reg->size;
487
488 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
489 WARN_ON(!res);
490
491 if (res) {
492 res->name = "System RAM";
493 res->start = base;
494 res->end = base + size - 1;
495 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
496 WARN_ON(request_resource(&iomem_resource, res) < 0);
497 }
498 }
499
500 return 0;
501}
502subsys_initcall(add_system_ram_resources);
503
504#ifdef CONFIG_STRICT_DEVMEM
505/*
506 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
507 * is valid. The argument is a physical page number.
508 *
509 * Access has to be given to non-kernel-ram areas as well, these contain the
510 * PCI mmio resources as well as potential bios/acpi data regions.
511 */
512int devmem_is_allowed(unsigned long pfn)
513{
514 if (page_is_rtas_user_buf(pfn))
515 return 1;
516 if (iomem_is_exclusive(PFN_PHYS(pfn)))
517 return 0;
518 if (!page_is_ram(pfn))
519 return 1;
520 return 0;
521}
522#endif /* CONFIG_STRICT_DEVMEM */
523
524/*
525 * This is defined in kernel/resource.c but only powerpc needs to export it, for
526 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
527 */
528EXPORT_SYMBOL_GPL(walk_system_ram_range);