Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: Do not initialise the fixmap page tables in head.S

The early_ioremap_init() function already handles fixmap pte
initialisation, so upgrade this to cover all of pud/pmd/pte and remove
one page from swapper_pg_dir.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Jungseok Lee <jungseoklee85@gmail.com>

+22 -19
+4 -4
arch/arm64/include/asm/page.h
··· 33 33 34 34 /* 35 35 * The idmap and swapper page tables need some space reserved in the kernel 36 - * image. The idmap only requires a pgd and a next level table to (section) map 37 - * the kernel, while the swapper also maps the FDT and requires an additional 38 - * table to map an early UART. See __create_page_tables for more information. 36 + * image. Both require a pgd and a next level table to (section) map the 37 + * kernel. The the swapper also maps the FDT (see __create_page_tables for 38 + * more information). 39 39 */ 40 - #define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) 40 + #define SWAPPER_DIR_SIZE (2 * PAGE_SIZE) 41 41 #define IDMAP_DIR_SIZE (2 * PAGE_SIZE) 42 42 43 43 #ifndef __ASSEMBLY__
-7
arch/arm64/kernel/head.S
··· 583 583 create_block_map x0, x7, x3, x5, x6 584 584 1: 585 585 /* 586 - * Create the pgd entry for the fixed mappings. 587 - */ 588 - ldr x5, =FIXADDR_TOP // Fixed mapping virtual address 589 - add x0, x26, #2 * PAGE_SIZE // section table address 590 - create_pgd_entry x26, x0, x5, x6, x7 591 - 592 - /* 593 586 * Since the page tables have been populated with non-cacheable 594 587 * accesses (MMU disabled), invalidate the idmap and swapper page 595 588 * tables again to remove any speculatively loaded cache lines.
+18 -8
arch/arm64/mm/ioremap.c
··· 103 103 } 104 104 EXPORT_SYMBOL(ioremap_cache); 105 105 106 - #ifndef CONFIG_ARM64_64K_PAGES 107 106 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; 107 + #ifndef CONFIG_ARM64_64K_PAGES 108 + static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss; 108 109 #endif 109 110 110 - static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) 111 + static inline pud_t * __init early_ioremap_pud(unsigned long addr) 111 112 { 112 113 pgd_t *pgd; 113 - pud_t *pud; 114 114 115 115 pgd = pgd_offset_k(addr); 116 116 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd)); 117 117 118 - pud = pud_offset(pgd, addr); 118 + return pud_offset(pgd, addr); 119 + } 120 + 121 + static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) 122 + { 123 + pud_t *pud = early_ioremap_pud(addr); 124 + 119 125 BUG_ON(pud_none(*pud) || pud_bad(*pud)); 120 126 121 127 return pmd_offset(pud, addr); ··· 138 132 139 133 void __init early_ioremap_init(void) 140 134 { 135 + pgd_t *pgd; 136 + pud_t *pud; 141 137 pmd_t *pmd; 138 + unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN); 142 139 143 - pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 144 - #ifndef CONFIG_ARM64_64K_PAGES 145 - /* need to populate pmd for 4k pagesize only */ 140 + pgd = pgd_offset_k(addr); 141 + pud = pud_offset(pgd, addr); 142 + pud_populate(&init_mm, pud, bm_pmd); 143 + pmd = pmd_offset(pud, addr); 146 144 pmd_populate_kernel(&init_mm, pmd, bm_pte); 147 - #endif 145 + 148 146 /* 149 147 * The boot-ioremap range spans multiple pmds, for which 150 148 * we are not prepared: