at master 3.4 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Kernel page table mapping 4 * 5 * Copyright (C) 2015 ARM Ltd. 6 */ 7 8#ifndef __ASM_KERNEL_PGTABLE_H 9#define __ASM_KERNEL_PGTABLE_H 10 11#include <asm/boot.h> 12#include <asm/pgtable-hwdef.h> 13#include <asm/sparsemem.h> 14 15/* 16 * The physical and virtual addresses of the start of the kernel image are 17 * equal modulo 2 MiB (per the arm64 booting.txt requirements). Hence we can 18 * use section mapping with 4K (section size = 2M) but not with 16K (section 19 * size = 32M) or 64K (section size = 512M). 20 */ 21#if defined(PMD_SIZE) && PMD_SIZE <= MIN_KIMG_ALIGN 22#define SWAPPER_BLOCK_SHIFT PMD_SHIFT 23#define SWAPPER_SKIP_LEVEL 1 24#else 25#define SWAPPER_BLOCK_SHIFT PAGE_SHIFT 26#define SWAPPER_SKIP_LEVEL 0 27#endif 28#define SWAPPER_BLOCK_SIZE (UL(1) << SWAPPER_BLOCK_SHIFT) 29 30#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - SWAPPER_SKIP_LEVEL) 31#define INIT_IDMAP_PGTABLE_LEVELS (IDMAP_LEVELS - SWAPPER_SKIP_LEVEL) 32 33#define IDMAP_VA_BITS 48 34#define IDMAP_LEVELS ARM64_HW_PGTABLE_LEVELS(IDMAP_VA_BITS) 35#define IDMAP_ROOT_LEVEL (4 - IDMAP_LEVELS) 36 37/* 38 * A relocatable kernel may execute from an address that differs from the one at 39 * which it was linked. In the worst case, its runtime placement may intersect 40 * with two adjacent PGDIR entries, which means that an additional page table 41 * may be needed at each subordinate level. 42 */ 43#define EXTRA_PAGE __is_defined(CONFIG_RELOCATABLE) 44 45#define SPAN_NR_ENTRIES(vstart, vend, shift) \ 46 ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1) 47 48#define EARLY_ENTRIES(lvl, vstart, vend) \ 49 SPAN_NR_ENTRIES(vstart, vend, SWAPPER_BLOCK_SHIFT + lvl * PTDESC_TABLE_SHIFT) 50 51#define EARLY_LEVEL(lvl, lvls, vstart, vend, add) \ 52 ((lvls) > (lvl) ? EARLY_ENTRIES(lvl, vstart, vend) + (add) : 0) 53 54#define EARLY_PAGES(lvls, vstart, vend, add) (1 /* PGDIR page */ \ 55 + EARLY_LEVEL(3, (lvls), (vstart), (vend), add) /* each entry needs a next level page table */ \ 56 + EARLY_LEVEL(2, (lvls), (vstart), (vend), add) /* each entry needs a next level page table */ \ 57 + EARLY_LEVEL(1, (lvls), (vstart), (vend), add))/* each entry needs a next level page table */ 58#define INIT_DIR_SIZE (PAGE_SIZE * (EARLY_PAGES(SWAPPER_PGTABLE_LEVELS, KIMAGE_VADDR, _end, EXTRA_PAGE) \ 59 + EARLY_SEGMENT_EXTRA_PAGES)) 60 61#define INIT_IDMAP_DIR_PAGES (EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, KIMAGE_VADDR, kimage_limit, 1)) 62#define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + EARLY_IDMAP_EXTRA_PAGES) * PAGE_SIZE) 63 64#define INIT_IDMAP_FDT_PAGES (EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, 0UL, UL(MAX_FDT_SIZE), 1) - 1) 65#define INIT_IDMAP_FDT_SIZE ((INIT_IDMAP_FDT_PAGES + EARLY_IDMAP_EXTRA_FDT_PAGES) * PAGE_SIZE) 66 67/* The number of segments in the kernel image (text, rodata, inittext, initdata, data+bss) */ 68#define KERNEL_SEGMENT_COUNT 5 69 70#if SWAPPER_BLOCK_SIZE > SEGMENT_ALIGN 71#define EARLY_SEGMENT_EXTRA_PAGES (KERNEL_SEGMENT_COUNT + 1) 72/* 73 * The initial ID map consists of the kernel image, mapped as two separate 74 * segments, and may appear misaligned wrt the swapper block size. This means 75 * we need 3 additional pages. The DT could straddle a swapper block boundary, 76 * so it may need 2. 77 */ 78#define EARLY_IDMAP_EXTRA_PAGES 3 79#define EARLY_IDMAP_EXTRA_FDT_PAGES 2 80#else 81#define EARLY_SEGMENT_EXTRA_PAGES 0 82#define EARLY_IDMAP_EXTRA_PAGES 0 83#define EARLY_IDMAP_EXTRA_FDT_PAGES 0 84#endif 85 86#endif /* __ASM_KERNEL_PGTABLE_H */