Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
8
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/hugetlb.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/highmem.h>
23#include <linux/pagemap.h>
24#include <linux/pci.h>
25#include <linux/pfn.h>
26#include <linux/poison.h>
27#include <linux/memblock.h>
28#include <linux/proc_fs.h>
29#include <linux/memory_hotplug.h>
30#include <linux/initrd.h>
31#include <linux/cpumask.h>
32#include <linux/gfp.h>
33
34#include <asm/asm.h>
35#include <asm/bios_ebda.h>
36#include <asm/processor.h>
37#include <linux/uaccess.h>
38#include <asm/pgtable.h>
39#include <asm/dma.h>
40#include <asm/fixmap.h>
41#include <asm/e820/api.h>
42#include <asm/apic.h>
43#include <asm/bugs.h>
44#include <asm/tlb.h>
45#include <asm/tlbflush.h>
46#include <asm/olpc_ofw.h>
47#include <asm/pgalloc.h>
48#include <asm/sections.h>
49#include <asm/paravirt.h>
50#include <asm/setup.h>
51#include <asm/set_memory.h>
52#include <asm/page_types.h>
53#include <asm/cpu_entry_area.h>
54#include <asm/init.h>
55#include <asm/pgtable_areas.h>
56
57#include "mm_internal.h"
58
59unsigned long highstart_pfn, highend_pfn;
60
61bool __read_mostly __vmalloc_start_set = false;
62
63/*
64 * Creates a middle page table and puts a pointer to it in the
65 * given global directory entry. This only returns the gd entry
66 * in non-PAE compilation mode, since the middle layer is folded.
67 */
68static pmd_t * __init one_md_table_init(pgd_t *pgd)
69{
70 p4d_t *p4d;
71 pud_t *pud;
72 pmd_t *pmd_table;
73
74#ifdef CONFIG_X86_PAE
75 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
76 pmd_table = (pmd_t *)alloc_low_page();
77 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
78 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
79 p4d = p4d_offset(pgd, 0);
80 pud = pud_offset(p4d, 0);
81 BUG_ON(pmd_table != pmd_offset(pud, 0));
82
83 return pmd_table;
84 }
85#endif
86 p4d = p4d_offset(pgd, 0);
87 pud = pud_offset(p4d, 0);
88 pmd_table = pmd_offset(pud, 0);
89
90 return pmd_table;
91}
92
93/*
94 * Create a page table and place a pointer to it in a middle page
95 * directory entry:
96 */
97static pte_t * __init one_page_table_init(pmd_t *pmd)
98{
99 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
100 pte_t *page_table = (pte_t *)alloc_low_page();
101
102 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
103 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
104 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
105 }
106
107 return pte_offset_kernel(pmd, 0);
108}
109
110pmd_t * __init populate_extra_pmd(unsigned long vaddr)
111{
112 int pgd_idx = pgd_index(vaddr);
113 int pmd_idx = pmd_index(vaddr);
114
115 return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
116}
117
118pte_t * __init populate_extra_pte(unsigned long vaddr)
119{
120 int pte_idx = pte_index(vaddr);
121 pmd_t *pmd;
122
123 pmd = populate_extra_pmd(vaddr);
124 return one_page_table_init(pmd) + pte_idx;
125}
126
127static unsigned long __init
128page_table_range_init_count(unsigned long start, unsigned long end)
129{
130 unsigned long count = 0;
131#ifdef CONFIG_HIGHMEM
132 int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
133 int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
134 int pgd_idx, pmd_idx;
135 unsigned long vaddr;
136
137 if (pmd_idx_kmap_begin == pmd_idx_kmap_end)
138 return 0;
139
140 vaddr = start;
141 pgd_idx = pgd_index(vaddr);
142 pmd_idx = pmd_index(vaddr);
143
144 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
145 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
146 pmd_idx++) {
147 if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
148 (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
149 count++;
150 vaddr += PMD_SIZE;
151 }
152 pmd_idx = 0;
153 }
154#endif
155 return count;
156}
157
158static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
159 unsigned long vaddr, pte_t *lastpte,
160 void **adr)
161{
162#ifdef CONFIG_HIGHMEM
163 /*
164 * Something (early fixmap) may already have put a pte
165 * page here, which causes the page table allocation
166 * to become nonlinear. Attempt to fix it, and if it
167 * is still nonlinear then we have to bug.
168 */
169 int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
170 int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
171
172 if (pmd_idx_kmap_begin != pmd_idx_kmap_end
173 && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
174 && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
175 pte_t *newpte;
176 int i;
177
178 BUG_ON(after_bootmem);
179 newpte = *adr;
180 for (i = 0; i < PTRS_PER_PTE; i++)
181 set_pte(newpte + i, pte[i]);
182 *adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);
183
184 paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
185 set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
186 BUG_ON(newpte != pte_offset_kernel(pmd, 0));
187 __flush_tlb_all();
188
189 paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
190 pte = newpte;
191 }
192 BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
193 && vaddr > fix_to_virt(FIX_KMAP_END)
194 && lastpte && lastpte + PTRS_PER_PTE != pte);
195#endif
196 return pte;
197}
198
199/*
200 * This function initializes a certain range of kernel virtual memory
201 * with new bootmem page tables, everywhere page tables are missing in
202 * the given range.
203 *
204 * NOTE: The pagetables are allocated contiguous on the physical space
205 * so we can cache the place of the first one and move around without
206 * checking the pgd every time.
207 */
208static void __init
209page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
210{
211 int pgd_idx, pmd_idx;
212 unsigned long vaddr;
213 pgd_t *pgd;
214 pmd_t *pmd;
215 pte_t *pte = NULL;
216 unsigned long count = page_table_range_init_count(start, end);
217 void *adr = NULL;
218
219 if (count)
220 adr = alloc_low_pages(count);
221
222 vaddr = start;
223 pgd_idx = pgd_index(vaddr);
224 pmd_idx = pmd_index(vaddr);
225 pgd = pgd_base + pgd_idx;
226
227 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
228 pmd = one_md_table_init(pgd);
229 pmd = pmd + pmd_index(vaddr);
230 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
231 pmd++, pmd_idx++) {
232 pte = page_table_kmap_check(one_page_table_init(pmd),
233 pmd, vaddr, pte, &adr);
234
235 vaddr += PMD_SIZE;
236 }
237 pmd_idx = 0;
238 }
239}
240
241/*
242 * The <linux/kallsyms.h> already defines is_kernel_text,
243 * using '__' prefix not to get in conflict.
244 */
245static inline int __is_kernel_text(unsigned long addr)
246{
247 if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
248 return 1;
249 return 0;
250}
251
252/*
253 * This maps the physical memory to kernel virtual address space, a total
254 * of max_low_pfn pages, by creating page tables starting from address
255 * PAGE_OFFSET:
256 */
257unsigned long __init
258kernel_physical_mapping_init(unsigned long start,
259 unsigned long end,
260 unsigned long page_size_mask,
261 pgprot_t prot)
262{
263 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
264 unsigned long last_map_addr = end;
265 unsigned long start_pfn, end_pfn;
266 pgd_t *pgd_base = swapper_pg_dir;
267 int pgd_idx, pmd_idx, pte_ofs;
268 unsigned long pfn;
269 pgd_t *pgd;
270 pmd_t *pmd;
271 pte_t *pte;
272 unsigned pages_2m, pages_4k;
273 int mapping_iter;
274
275 start_pfn = start >> PAGE_SHIFT;
276 end_pfn = end >> PAGE_SHIFT;
277
278 /*
279 * First iteration will setup identity mapping using large/small pages
280 * based on use_pse, with other attributes same as set by
281 * the early code in head_32.S
282 *
283 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
284 * as desired for the kernel identity mapping.
285 *
286 * This two pass mechanism conforms to the TLB app note which says:
287 *
288 * "Software should not write to a paging-structure entry in a way
289 * that would change, for any linear address, both the page size
290 * and either the page frame or attributes."
291 */
292 mapping_iter = 1;
293
294 if (!boot_cpu_has(X86_FEATURE_PSE))
295 use_pse = 0;
296
297repeat:
298 pages_2m = pages_4k = 0;
299 pfn = start_pfn;
300 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
301 pgd = pgd_base + pgd_idx;
302 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
303 pmd = one_md_table_init(pgd);
304
305 if (pfn >= end_pfn)
306 continue;
307#ifdef CONFIG_X86_PAE
308 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
309 pmd += pmd_idx;
310#else
311 pmd_idx = 0;
312#endif
313 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
314 pmd++, pmd_idx++) {
315 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
316
317 /*
318 * Map with big pages if possible, otherwise
319 * create normal page tables:
320 */
321 if (use_pse) {
322 unsigned int addr2;
323 pgprot_t prot = PAGE_KERNEL_LARGE;
324 /*
325 * first pass will use the same initial
326 * identity mapping attribute + _PAGE_PSE.
327 */
328 pgprot_t init_prot =
329 __pgprot(PTE_IDENT_ATTR |
330 _PAGE_PSE);
331
332 pfn &= PMD_MASK >> PAGE_SHIFT;
333 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
334 PAGE_OFFSET + PAGE_SIZE-1;
335
336 if (__is_kernel_text(addr) ||
337 __is_kernel_text(addr2))
338 prot = PAGE_KERNEL_LARGE_EXEC;
339
340 pages_2m++;
341 if (mapping_iter == 1)
342 set_pmd(pmd, pfn_pmd(pfn, init_prot));
343 else
344 set_pmd(pmd, pfn_pmd(pfn, prot));
345
346 pfn += PTRS_PER_PTE;
347 continue;
348 }
349 pte = one_page_table_init(pmd);
350
351 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
352 pte += pte_ofs;
353 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
354 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
355 pgprot_t prot = PAGE_KERNEL;
356 /*
357 * first pass will use the same initial
358 * identity mapping attribute.
359 */
360 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
361
362 if (__is_kernel_text(addr))
363 prot = PAGE_KERNEL_EXEC;
364
365 pages_4k++;
366 if (mapping_iter == 1) {
367 set_pte(pte, pfn_pte(pfn, init_prot));
368 last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
369 } else
370 set_pte(pte, pfn_pte(pfn, prot));
371 }
372 }
373 }
374 if (mapping_iter == 1) {
375 /*
376 * update direct mapping page count only in the first
377 * iteration.
378 */
379 update_page_count(PG_LEVEL_2M, pages_2m);
380 update_page_count(PG_LEVEL_4K, pages_4k);
381
382 /*
383 * local global flush tlb, which will flush the previous
384 * mappings present in both small and large page TLB's.
385 */
386 __flush_tlb_all();
387
388 /*
389 * Second iteration will set the actual desired PTE attributes.
390 */
391 mapping_iter = 2;
392 goto repeat;
393 }
394 return last_map_addr;
395}
396
397pte_t *kmap_pte;
398
399static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
400{
401 pgd_t *pgd = pgd_offset_k(vaddr);
402 p4d_t *p4d = p4d_offset(pgd, vaddr);
403 pud_t *pud = pud_offset(p4d, vaddr);
404 pmd_t *pmd = pmd_offset(pud, vaddr);
405 return pte_offset_kernel(pmd, vaddr);
406}
407
408static void __init kmap_init(void)
409{
410 unsigned long kmap_vstart;
411
412 /*
413 * Cache the first kmap pte:
414 */
415 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
416 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
417}
418
419#ifdef CONFIG_HIGHMEM
420static void __init permanent_kmaps_init(pgd_t *pgd_base)
421{
422 unsigned long vaddr;
423 pgd_t *pgd;
424 p4d_t *p4d;
425 pud_t *pud;
426 pmd_t *pmd;
427 pte_t *pte;
428
429 vaddr = PKMAP_BASE;
430 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
431
432 pgd = swapper_pg_dir + pgd_index(vaddr);
433 p4d = p4d_offset(pgd, vaddr);
434 pud = pud_offset(p4d, vaddr);
435 pmd = pmd_offset(pud, vaddr);
436 pte = pte_offset_kernel(pmd, vaddr);
437 pkmap_page_table = pte;
438}
439
440void __init add_highpages_with_active_regions(int nid,
441 unsigned long start_pfn, unsigned long end_pfn)
442{
443 phys_addr_t start, end;
444 u64 i;
445
446 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
447 unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
448 start_pfn, end_pfn);
449 unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
450 start_pfn, end_pfn);
451 for ( ; pfn < e_pfn; pfn++)
452 if (pfn_valid(pfn))
453 free_highmem_page(pfn_to_page(pfn));
454 }
455}
456#else
457static inline void permanent_kmaps_init(pgd_t *pgd_base)
458{
459}
460#endif /* CONFIG_HIGHMEM */
461
462void __init sync_initial_page_table(void)
463{
464 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
465 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
466 KERNEL_PGD_PTRS);
467
468 /*
469 * sync back low identity map too. It is used for example
470 * in the 32-bit EFI stub.
471 */
472 clone_pgd_range(initial_page_table,
473 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
474 min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
475}
476
477void __init native_pagetable_init(void)
478{
479 unsigned long pfn, va;
480 pgd_t *pgd, *base = swapper_pg_dir;
481 p4d_t *p4d;
482 pud_t *pud;
483 pmd_t *pmd;
484 pte_t *pte;
485
486 /*
487 * Remove any mappings which extend past the end of physical
488 * memory from the boot time page table.
489 * In virtual address space, we should have at least two pages
490 * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END
491 * definition. And max_low_pfn is set to VMALLOC_END physical
492 * address. If initial memory mapping is doing right job, we
493 * should have pte used near max_low_pfn or one pmd is not present.
494 */
495 for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
496 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
497 pgd = base + pgd_index(va);
498 if (!pgd_present(*pgd))
499 break;
500
501 p4d = p4d_offset(pgd, va);
502 pud = pud_offset(p4d, va);
503 pmd = pmd_offset(pud, va);
504 if (!pmd_present(*pmd))
505 break;
506
507 /* should not be large page here */
508 if (pmd_large(*pmd)) {
509 pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
510 pfn, pmd, __pa(pmd));
511 BUG_ON(1);
512 }
513
514 pte = pte_offset_kernel(pmd, va);
515 if (!pte_present(*pte))
516 break;
517
518 printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n",
519 pfn, pmd, __pa(pmd), pte, __pa(pte));
520 pte_clear(NULL, va, pte);
521 }
522 paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
523 paging_init();
524}
525
526/*
527 * Build a proper pagetable for the kernel mappings. Up until this
528 * point, we've been running on some set of pagetables constructed by
529 * the boot process.
530 *
531 * If we're booting on native hardware, this will be a pagetable
532 * constructed in arch/x86/kernel/head_32.S. The root of the
533 * pagetable will be swapper_pg_dir.
534 *
535 * If we're booting paravirtualized under a hypervisor, then there are
536 * more options: we may already be running PAE, and the pagetable may
537 * or may not be based in swapper_pg_dir. In any case,
538 * paravirt_pagetable_init() will set up swapper_pg_dir
539 * appropriately for the rest of the initialization to work.
540 *
541 * In general, pagetable_init() assumes that the pagetable may already
542 * be partially populated, and so it avoids stomping on any existing
543 * mappings.
544 */
545void __init early_ioremap_page_table_range_init(void)
546{
547 pgd_t *pgd_base = swapper_pg_dir;
548 unsigned long vaddr, end;
549
550 /*
551 * Fixed mappings, only the page table structure has to be
552 * created - mappings will be set by set_fixmap():
553 */
554 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
555 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
556 page_table_range_init(vaddr, end, pgd_base);
557 early_ioremap_reset();
558}
559
560static void __init pagetable_init(void)
561{
562 pgd_t *pgd_base = swapper_pg_dir;
563
564 permanent_kmaps_init(pgd_base);
565}
566
567#define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL)
568/* Bits supported by the hardware: */
569pteval_t __supported_pte_mask __read_mostly = DEFAULT_PTE_MASK;
570/* Bits allowed in normal kernel mappings: */
571pteval_t __default_kernel_pte_mask __read_mostly = DEFAULT_PTE_MASK;
572EXPORT_SYMBOL_GPL(__supported_pte_mask);
573/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
574EXPORT_SYMBOL(__default_kernel_pte_mask);
575
576/* user-defined highmem size */
577static unsigned int highmem_pages = -1;
578
579/*
580 * highmem=size forces highmem to be exactly 'size' bytes.
581 * This works even on boxes that have no highmem otherwise.
582 * This also works to reduce highmem size on bigger boxes.
583 */
584static int __init parse_highmem(char *arg)
585{
586 if (!arg)
587 return -EINVAL;
588
589 highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
590 return 0;
591}
592early_param("highmem", parse_highmem);
593
594#define MSG_HIGHMEM_TOO_BIG \
595 "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
596
597#define MSG_LOWMEM_TOO_SMALL \
598 "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
599/*
600 * All of RAM fits into lowmem - but if user wants highmem
601 * artificially via the highmem=x boot parameter then create
602 * it:
603 */
604static void __init lowmem_pfn_init(void)
605{
606 /* max_low_pfn is 0, we already have early_res support */
607 max_low_pfn = max_pfn;
608
609 if (highmem_pages == -1)
610 highmem_pages = 0;
611#ifdef CONFIG_HIGHMEM
612 if (highmem_pages >= max_pfn) {
613 printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
614 pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
615 highmem_pages = 0;
616 }
617 if (highmem_pages) {
618 if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
619 printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
620 pages_to_mb(highmem_pages));
621 highmem_pages = 0;
622 }
623 max_low_pfn -= highmem_pages;
624 }
625#else
626 if (highmem_pages)
627 printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
628#endif
629}
630
631#define MSG_HIGHMEM_TOO_SMALL \
632 "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
633
634#define MSG_HIGHMEM_TRIMMED \
635 "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
636/*
637 * We have more RAM than fits into lowmem - we try to put it into
638 * highmem, also taking the highmem=x boot parameter into account:
639 */
640static void __init highmem_pfn_init(void)
641{
642 max_low_pfn = MAXMEM_PFN;
643
644 if (highmem_pages == -1)
645 highmem_pages = max_pfn - MAXMEM_PFN;
646
647 if (highmem_pages + MAXMEM_PFN < max_pfn)
648 max_pfn = MAXMEM_PFN + highmem_pages;
649
650 if (highmem_pages + MAXMEM_PFN > max_pfn) {
651 printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
652 pages_to_mb(max_pfn - MAXMEM_PFN),
653 pages_to_mb(highmem_pages));
654 highmem_pages = 0;
655 }
656#ifndef CONFIG_HIGHMEM
657 /* Maximum memory usable is what is directly addressable */
658 printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
659 if (max_pfn > MAX_NONPAE_PFN)
660 printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
661 else
662 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
663 max_pfn = MAXMEM_PFN;
664#else /* !CONFIG_HIGHMEM */
665#ifndef CONFIG_HIGHMEM64G
666 if (max_pfn > MAX_NONPAE_PFN) {
667 max_pfn = MAX_NONPAE_PFN;
668 printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
669 }
670#endif /* !CONFIG_HIGHMEM64G */
671#endif /* !CONFIG_HIGHMEM */
672}
673
674/*
675 * Determine low and high memory ranges:
676 */
677void __init find_low_pfn_range(void)
678{
679 /* it could update max_pfn */
680
681 if (max_pfn <= MAXMEM_PFN)
682 lowmem_pfn_init();
683 else
684 highmem_pfn_init();
685}
686
687#ifndef CONFIG_NEED_MULTIPLE_NODES
688void __init initmem_init(void)
689{
690#ifdef CONFIG_HIGHMEM
691 highstart_pfn = highend_pfn = max_pfn;
692 if (max_pfn > max_low_pfn)
693 highstart_pfn = max_low_pfn;
694 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
695 pages_to_mb(highend_pfn - highstart_pfn));
696 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
697#else
698 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
699#endif
700
701 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
702 sparse_memory_present_with_active_regions(0);
703
704#ifdef CONFIG_FLATMEM
705 max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
706#endif
707 __vmalloc_start_set = true;
708
709 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
710 pages_to_mb(max_low_pfn));
711
712 setup_bootmem_allocator();
713}
714#endif /* !CONFIG_NEED_MULTIPLE_NODES */
715
716void __init setup_bootmem_allocator(void)
717{
718 printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
719 max_pfn_mapped<<PAGE_SHIFT);
720 printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
721}
722
723/*
724 * paging_init() sets up the page tables - note that the first 8MB are
725 * already mapped by head.S.
726 *
727 * This routines also unmaps the page at virtual kernel address 0, so
728 * that we can trap those pesky NULL-reference errors in the kernel.
729 */
730void __init paging_init(void)
731{
732 pagetable_init();
733
734 __flush_tlb_all();
735
736 kmap_init();
737
738 /*
739 * NOTE: at this point the bootmem allocator is fully available.
740 */
741 olpc_dt_build_devicetree();
742 sparse_memory_present_with_active_regions(MAX_NUMNODES);
743 sparse_init();
744 zone_sizes_init();
745}
746
747/*
748 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
749 * and also on some strange 486's. All 586+'s are OK. This used to involve
750 * black magic jumps to work around some nasty CPU bugs, but fortunately the
751 * switch to using exceptions got rid of all that.
752 */
753static void __init test_wp_bit(void)
754{
755 char z = 0;
756
757 printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode...");
758
759 __set_fixmap(FIX_WP_TEST, __pa_symbol(empty_zero_page), PAGE_KERNEL_RO);
760
761 if (probe_kernel_write((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) {
762 clear_fixmap(FIX_WP_TEST);
763 printk(KERN_CONT "Ok.\n");
764 return;
765 }
766
767 printk(KERN_CONT "No.\n");
768 panic("Linux doesn't support CPUs with broken WP.");
769}
770
771void __init mem_init(void)
772{
773 pci_iommu_alloc();
774
775#ifdef CONFIG_FLATMEM
776 BUG_ON(!mem_map);
777#endif
778 /*
779 * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
780 * be done before memblock_free_all(). Memblock use free low memory for
781 * temporary data (see find_range_array()) and for this purpose can use
782 * pages that was already passed to the buddy allocator, hence marked as
783 * not accessible in the page tables when compiled with
784 * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
785 * important here.
786 */
787 set_highmem_pages_init();
788
789 /* this will put all low memory onto the freelists */
790 memblock_free_all();
791
792 after_bootmem = 1;
793 x86_init.hyper.init_after_bootmem();
794
795 mem_init_print_info(NULL);
796
797 /*
798 * Check boundaries twice: Some fundamental inconsistencies can
799 * be detected at build time already.
800 */
801#define __FIXADDR_TOP (-PAGE_SIZE)
802#ifdef CONFIG_HIGHMEM
803 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
804 BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
805#endif
806#define high_memory (-128UL << 20)
807 BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
808#undef high_memory
809#undef __FIXADDR_TOP
810
811#ifdef CONFIG_HIGHMEM
812 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
813 BUG_ON(VMALLOC_END > PKMAP_BASE);
814#endif
815 BUG_ON(VMALLOC_START >= VMALLOC_END);
816 BUG_ON((unsigned long)high_memory > VMALLOC_START);
817
818 test_wp_bit();
819}
820
821#ifdef CONFIG_MEMORY_HOTPLUG
822int arch_add_memory(int nid, u64 start, u64 size,
823 struct mhp_params *params)
824{
825 unsigned long start_pfn = start >> PAGE_SHIFT;
826 unsigned long nr_pages = size >> PAGE_SHIFT;
827 int ret;
828
829 /*
830 * The page tables were already mapped at boot so if the caller
831 * requests a different mapping type then we must change all the
832 * pages with __set_memory_prot().
833 */
834 if (params->pgprot.pgprot != PAGE_KERNEL.pgprot) {
835 ret = __set_memory_prot(start, nr_pages, params->pgprot);
836 if (ret)
837 return ret;
838 }
839
840 return __add_pages(nid, start_pfn, nr_pages, params);
841}
842
843void arch_remove_memory(int nid, u64 start, u64 size,
844 struct vmem_altmap *altmap)
845{
846 unsigned long start_pfn = start >> PAGE_SHIFT;
847 unsigned long nr_pages = size >> PAGE_SHIFT;
848
849 __remove_pages(start_pfn, nr_pages, altmap);
850}
851#endif
852
853int kernel_set_to_readonly __read_mostly;
854
855static void mark_nxdata_nx(void)
856{
857 /*
858 * When this called, init has already been executed and released,
859 * so everything past _etext should be NX.
860 */
861 unsigned long start = PFN_ALIGN(_etext);
862 /*
863 * This comes from __is_kernel_text upper limit. Also HPAGE where used:
864 */
865 unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
866
867 if (__supported_pte_mask & _PAGE_NX)
868 printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
869 set_memory_nx(start, size >> PAGE_SHIFT);
870}
871
872void mark_rodata_ro(void)
873{
874 unsigned long start = PFN_ALIGN(_text);
875 unsigned long size = (unsigned long)__end_rodata - start;
876
877 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
878 pr_info("Write protecting kernel text and read-only data: %luk\n",
879 size >> 10);
880
881 kernel_set_to_readonly = 1;
882
883#ifdef CONFIG_CPA_DEBUG
884 pr_info("Testing CPA: Reverting %lx-%lx\n", start, start + size);
885 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
886
887 pr_info("Testing CPA: write protecting again\n");
888 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
889#endif
890 mark_nxdata_nx();
891 if (__supported_pte_mask & _PAGE_NX)
892 debug_checkwx();
893}