Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 *
18 */
19
20#include <linux/export.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/errno.h>
24#include <linux/string.h>
25#include <linux/gfp.h>
26#include <linux/types.h>
27#include <linux/mm.h>
28#include <linux/stddef.h>
29#include <linux/init.h>
30#include <linux/memblock.h>
31#include <linux/highmem.h>
32#include <linux/initrd.h>
33#include <linux/pagemap.h>
34#include <linux/suspend.h>
35#include <linux/hugetlb.h>
36#include <linux/slab.h>
37#include <linux/vmalloc.h>
38#include <linux/memremap.h>
39
40#include <asm/pgalloc.h>
41#include <asm/prom.h>
42#include <asm/io.h>
43#include <asm/mmu_context.h>
44#include <asm/pgtable.h>
45#include <asm/mmu.h>
46#include <asm/smp.h>
47#include <asm/machdep.h>
48#include <asm/btext.h>
49#include <asm/tlb.h>
50#include <asm/sections.h>
51#include <asm/sparsemem.h>
52#include <asm/vdso.h>
53#include <asm/fixmap.h>
54#include <asm/swiotlb.h>
55#include <asm/rtas.h>
56
57#include "mmu_decl.h"
58
59#ifndef CPU_FTR_COHERENT_ICACHE
60#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
61#define CPU_FTR_NOEXECUTE 0
62#endif
63
64unsigned long long memory_limit;
65bool init_mem_is_free;
66
67#ifdef CONFIG_HIGHMEM
68pte_t *kmap_pte;
69EXPORT_SYMBOL(kmap_pte);
70pgprot_t kmap_prot;
71EXPORT_SYMBOL(kmap_prot);
72
73static inline pte_t *virt_to_kpte(unsigned long vaddr)
74{
75 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
76 vaddr), vaddr), vaddr);
77}
78#endif
79
80pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
81 unsigned long size, pgprot_t vma_prot)
82{
83 if (ppc_md.phys_mem_access_prot)
84 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
85
86 if (!page_is_ram(pfn))
87 vma_prot = pgprot_noncached(vma_prot);
88
89 return vma_prot;
90}
91EXPORT_SYMBOL(phys_mem_access_prot);
92
93#ifdef CONFIG_MEMORY_HOTPLUG
94
95#ifdef CONFIG_NUMA
96int memory_add_physaddr_to_nid(u64 start)
97{
98 return hot_add_scn_to_nid(start);
99}
100#endif
101
102int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
103{
104 return -ENODEV;
105}
106
107int __weak remove_section_mapping(unsigned long start, unsigned long end)
108{
109 return -ENODEV;
110}
111
112int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
113 bool want_memblock)
114{
115 unsigned long start_pfn = start >> PAGE_SHIFT;
116 unsigned long nr_pages = size >> PAGE_SHIFT;
117 int rc;
118
119 resize_hpt_for_hotplug(memblock_phys_mem_size());
120
121 start = (unsigned long)__va(start);
122 rc = create_section_mapping(start, start + size, nid);
123 if (rc) {
124 pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
125 start, start + size, rc);
126 return -EFAULT;
127 }
128 flush_inval_dcache_range(start, start + size);
129
130 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
131}
132
133#ifdef CONFIG_MEMORY_HOTREMOVE
134int __meminit arch_remove_memory(int nid, u64 start, u64 size,
135 struct vmem_altmap *altmap)
136{
137 unsigned long start_pfn = start >> PAGE_SHIFT;
138 unsigned long nr_pages = size >> PAGE_SHIFT;
139 struct page *page;
140 int ret;
141
142 /*
143 * If we have an altmap then we need to skip over any reserved PFNs
144 * when querying the zone.
145 */
146 page = pfn_to_page(start_pfn);
147 if (altmap)
148 page += vmem_altmap_offset(altmap);
149
150 ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
151 if (ret)
152 return ret;
153
154 /* Remove htab bolted mappings for this section of memory */
155 start = (unsigned long)__va(start);
156 flush_inval_dcache_range(start, start + size);
157 ret = remove_section_mapping(start, start + size);
158
159 /* Ensure all vmalloc mappings are flushed in case they also
160 * hit that section of memory
161 */
162 vm_unmap_aliases();
163
164 resize_hpt_for_hotplug(memblock_phys_mem_size());
165
166 return ret;
167}
168#endif
169#endif /* CONFIG_MEMORY_HOTPLUG */
170
171#ifndef CONFIG_NEED_MULTIPLE_NODES
172void __init mem_topology_setup(void)
173{
174 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
175 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
176#ifdef CONFIG_HIGHMEM
177 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
178#endif
179
180 /* Place all memblock_regions in the same node and merge contiguous
181 * memblock_regions
182 */
183 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
184}
185
186void __init initmem_init(void)
187{
188 /* XXX need to clip this if using highmem? */
189 sparse_memory_present_with_active_regions(0);
190 sparse_init();
191}
192
193/* mark pages that don't exist as nosave */
194static int __init mark_nonram_nosave(void)
195{
196 struct memblock_region *reg, *prev = NULL;
197
198 for_each_memblock(memory, reg) {
199 if (prev &&
200 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
201 register_nosave_region(memblock_region_memory_end_pfn(prev),
202 memblock_region_memory_base_pfn(reg));
203 prev = reg;
204 }
205 return 0;
206}
207#else /* CONFIG_NEED_MULTIPLE_NODES */
208static int __init mark_nonram_nosave(void)
209{
210 return 0;
211}
212#endif
213
214/*
215 * Zones usage:
216 *
217 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
218 * everything else. GFP_DMA32 page allocations automatically fall back to
219 * ZONE_DMA.
220 *
221 * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to
222 * inform the generic DMA mapping code. 32-bit only devices (if not handled
223 * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get
224 * otherwise served by ZONE_DMA.
225 */
226static unsigned long max_zone_pfns[MAX_NR_ZONES];
227
228/*
229 * paging_init() sets up the page tables - in fact we've already done this.
230 */
231void __init paging_init(void)
232{
233 unsigned long long total_ram = memblock_phys_mem_size();
234 phys_addr_t top_of_ram = memblock_end_of_DRAM();
235
236#ifdef CONFIG_PPC32
237 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
238 unsigned long end = __fix_to_virt(FIX_HOLE);
239
240 for (; v < end; v += PAGE_SIZE)
241 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
242#endif
243
244#ifdef CONFIG_HIGHMEM
245 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
246 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
247
248 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
249 kmap_prot = PAGE_KERNEL;
250#endif /* CONFIG_HIGHMEM */
251
252 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
253 (unsigned long long)top_of_ram, total_ram);
254 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
255 (long int)((top_of_ram - total_ram) >> 20));
256
257#ifdef CONFIG_ZONE_DMA
258 max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 0x7fffffffUL >> PAGE_SHIFT);
259#endif
260 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
261#ifdef CONFIG_HIGHMEM
262 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
263#endif
264
265 free_area_init_nodes(max_zone_pfns);
266
267 mark_nonram_nosave();
268}
269
270void __init mem_init(void)
271{
272 /*
273 * book3s is limited to 16 page sizes due to encoding this in
274 * a 4-bit field for slices.
275 */
276 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
277
278#ifdef CONFIG_SWIOTLB
279 swiotlb_init(0);
280#endif
281
282 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
283 set_max_mapnr(max_pfn);
284 memblock_free_all();
285
286#ifdef CONFIG_HIGHMEM
287 {
288 unsigned long pfn, highmem_mapnr;
289
290 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
291 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
292 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
293 struct page *page = pfn_to_page(pfn);
294 if (!memblock_is_reserved(paddr))
295 free_highmem_page(page);
296 }
297 }
298#endif /* CONFIG_HIGHMEM */
299
300#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
301 /*
302 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
303 * functions.... do it here for the non-smp case.
304 */
305 per_cpu(next_tlbcam_idx, smp_processor_id()) =
306 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
307#endif
308
309 mem_init_print_info(NULL);
310#ifdef CONFIG_PPC32
311 pr_info("Kernel virtual memory layout:\n");
312 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
313#ifdef CONFIG_HIGHMEM
314 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
315 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
316#endif /* CONFIG_HIGHMEM */
317#ifdef CONFIG_NOT_COHERENT_CACHE
318 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
319 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
320#endif /* CONFIG_NOT_COHERENT_CACHE */
321 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
322 ioremap_bot, IOREMAP_TOP);
323 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
324 VMALLOC_START, VMALLOC_END);
325#endif /* CONFIG_PPC32 */
326}
327
328void free_initmem(void)
329{
330 ppc_md.progress = ppc_printk_progress;
331 mark_initmem_nx();
332 init_mem_is_free = true;
333 free_initmem_default(POISON_FREE_INITMEM);
334}
335
336#ifdef CONFIG_BLK_DEV_INITRD
337void __init free_initrd_mem(unsigned long start, unsigned long end)
338{
339 free_reserved_area((void *)start, (void *)end, -1, "initrd");
340}
341#endif
342
343/*
344 * This is called when a page has been modified by the kernel.
345 * It just marks the page as not i-cache clean. We do the i-cache
346 * flush later when the page is given to a user process, if necessary.
347 */
348void flush_dcache_page(struct page *page)
349{
350 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
351 return;
352 /* avoid an atomic op if possible */
353 if (test_bit(PG_arch_1, &page->flags))
354 clear_bit(PG_arch_1, &page->flags);
355}
356EXPORT_SYMBOL(flush_dcache_page);
357
358void flush_dcache_icache_page(struct page *page)
359{
360#ifdef CONFIG_HUGETLB_PAGE
361 if (PageCompound(page)) {
362 flush_dcache_icache_hugepage(page);
363 return;
364 }
365#endif
366#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
367 /* On 8xx there is no need to kmap since highmem is not supported */
368 __flush_dcache_icache(page_address(page));
369#else
370 if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
371 void *start = kmap_atomic(page);
372 __flush_dcache_icache(start);
373 kunmap_atomic(start);
374 } else {
375 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
376 }
377#endif
378}
379EXPORT_SYMBOL(flush_dcache_icache_page);
380
381void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
382{
383 clear_page(page);
384
385 /*
386 * We shouldn't have to do this, but some versions of glibc
387 * require it (ld.so assumes zero filled pages are icache clean)
388 * - Anton
389 */
390 flush_dcache_page(pg);
391}
392EXPORT_SYMBOL(clear_user_page);
393
394void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
395 struct page *pg)
396{
397 copy_page(vto, vfrom);
398
399 /*
400 * We should be able to use the following optimisation, however
401 * there are two problems.
402 * Firstly a bug in some versions of binutils meant PLT sections
403 * were not marked executable.
404 * Secondly the first word in the GOT section is blrl, used
405 * to establish the GOT address. Until recently the GOT was
406 * not marked executable.
407 * - Anton
408 */
409#if 0
410 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
411 return;
412#endif
413
414 flush_dcache_page(pg);
415}
416
417void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
418 unsigned long addr, int len)
419{
420 unsigned long maddr;
421
422 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
423 flush_icache_range(maddr, maddr + len);
424 kunmap(page);
425}
426EXPORT_SYMBOL(flush_icache_user_range);
427
428/*
429 * This is called at the end of handling a user page fault, when the
430 * fault has been handled by updating a PTE in the linux page tables.
431 * We use it to preload an HPTE into the hash table corresponding to
432 * the updated linux PTE.
433 *
434 * This must always be called with the pte lock held.
435 */
436void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
437 pte_t *ptep)
438{
439#ifdef CONFIG_PPC_BOOK3S
440 /*
441 * We don't need to worry about _PAGE_PRESENT here because we are
442 * called with either mm->page_table_lock held or ptl lock held
443 */
444 unsigned long trap;
445 bool is_exec;
446
447 if (radix_enabled()) {
448 prefetch((void *)address);
449 return;
450 }
451
452 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
453 if (!pte_young(*ptep) || address >= TASK_SIZE)
454 return;
455
456 /* We try to figure out if we are coming from an instruction
457 * access fault and pass that down to __hash_page so we avoid
458 * double-faulting on execution of fresh text. We have to test
459 * for regs NULL since init will get here first thing at boot
460 *
461 * We also avoid filling the hash if not coming from a fault
462 */
463
464 trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
465 switch (trap) {
466 case 0x300:
467 is_exec = false;
468 break;
469 case 0x400:
470 is_exec = true;
471 break;
472 default:
473 return;
474 }
475
476 hash_preload(vma->vm_mm, address, is_exec, trap);
477#endif /* CONFIG_PPC_BOOK3S */
478#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
479 && defined(CONFIG_HUGETLB_PAGE)
480 if (is_vm_hugetlb_page(vma))
481 book3e_hugetlb_preload(vma, address, *ptep);
482#endif
483}
484
485/*
486 * System memory should not be in /proc/iomem but various tools expect it
487 * (eg kdump).
488 */
489static int __init add_system_ram_resources(void)
490{
491 struct memblock_region *reg;
492
493 for_each_memblock(memory, reg) {
494 struct resource *res;
495 unsigned long base = reg->base;
496 unsigned long size = reg->size;
497
498 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
499 WARN_ON(!res);
500
501 if (res) {
502 res->name = "System RAM";
503 res->start = base;
504 res->end = base + size - 1;
505 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
506 WARN_ON(request_resource(&iomem_resource, res) < 0);
507 }
508 }
509
510 return 0;
511}
512subsys_initcall(add_system_ram_resources);
513
514#ifdef CONFIG_STRICT_DEVMEM
515/*
516 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
517 * is valid. The argument is a physical page number.
518 *
519 * Access has to be given to non-kernel-ram areas as well, these contain the
520 * PCI mmio resources as well as potential bios/acpi data regions.
521 */
522int devmem_is_allowed(unsigned long pfn)
523{
524 if (page_is_rtas_user_buf(pfn))
525 return 1;
526 if (iomem_is_exclusive(PFN_PHYS(pfn)))
527 return 0;
528 if (!page_is_ram(pfn))
529 return 1;
530 return 0;
531}
532#endif /* CONFIG_STRICT_DEVMEM */
533
534/*
535 * This is defined in kernel/resource.c but only powerpc needs to export it, for
536 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
537 */
538EXPORT_SYMBOL_GPL(walk_system_ram_range);