Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 9104/2: Fix Keystone 2 kernel mapping regression

This fixes a Keystone 2 regression discovered as a side effect of
defining an passing the physical start/end sections of the kernel
to the MMU remapping code.

As the Keystone applies an offset to all physical addresses,
including those identified and patches by phys2virt, we fail to
account for this offset in the kernel_sec_start and kernel_sec_end
variables.

Further these offsets can extend into the 64bit range on LPAE
systems such as the Keystone 2.

Fix it like this:
- Extend kernel_sec_start and kernel_sec_end to be 64bit
- Add the offset also to kernel_sec_start and kernel_sec_end

As passing kernel_sec_start and kernel_sec_end as 64bit invariably
incurs BE8 endianness issues I have attempted to dry-code around
these.

Tested on the Vexpress QEMU model both with and without LPAE
enabled.

Fixes: 6e121df14ccd ("ARM: 9090/1: Map the lowmem and kernel separately")
Reported-by: Nishanth Menon <nmenon@kernel.org>
Suggested-by: Russell King <rmk+kernel@armlinux.org.uk>
Tested-by: Grygorii Strashko <grygorii.strashko@ti.com>
Tested-by: Nishanth Menon <nmenon@kernel.org>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>

authored by

Linus Walleij and committed by
Russell King (Oracle)
463dbba4 e73f0f0e

+27 -8
+4 -3
arch/arm/include/asm/memory.h
··· 160 160 161 161 /* 162 162 * Physical start and end address of the kernel sections. These addresses are 163 - * 2MB-aligned to match the section mappings placed over the kernel. 163 + * 2MB-aligned to match the section mappings placed over the kernel. We use 164 + * u64 so that LPAE mappings beyond the 32bit limit will work out as well. 164 165 */ 165 - extern u32 kernel_sec_start; 166 - extern u32 kernel_sec_end; 166 + extern u64 kernel_sec_start; 167 + extern u64 kernel_sec_end; 167 168 168 169 /* 169 170 * Physical vs virtual RAM address space conversion. These are
+14 -3
arch/arm/kernel/head.S
··· 49 49 50 50 /* 51 51 * This needs to be assigned at runtime when the linker symbols are 52 - * resolved. 52 + * resolved. These are unsigned 64bit really, but in this assembly code 53 + * We store them as 32bit. 53 54 */ 54 55 .pushsection .data 55 56 .align 2 ··· 58 57 .globl kernel_sec_end 59 58 kernel_sec_start: 60 59 .long 0 60 + .long 0 61 61 kernel_sec_end: 62 + .long 0 62 63 .long 0 63 64 .popsection 64 65 ··· 253 250 add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER) 254 251 ldr r6, =(_end - 1) 255 252 adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) 256 - str r8, [r5] @ Save physical start of kernel 253 + #ifdef CONFIG_CPU_ENDIAN_BE8 254 + str r8, [r5, #4] @ Save physical start of kernel (BE) 255 + #else 256 + str r8, [r5] @ Save physical start of kernel (LE) 257 + #endif 257 258 orr r3, r8, r7 @ Add the MMU flags 258 259 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 259 260 1: str r3, [r0], #1 << PMD_ORDER ··· 266 259 bls 1b 267 260 eor r3, r3, r7 @ Remove the MMU flags 268 261 adr_l r5, kernel_sec_end @ _pa(kernel_sec_end) 269 - str r3, [r5] @ Save physical end of kernel 262 + #ifdef CONFIG_CPU_ENDIAN_BE8 263 + str r3, [r5, #4] @ Save physical end of kernel (BE) 264 + #else 265 + str r3, [r5] @ Save physical end of kernel (LE) 266 + #endif 270 267 271 268 #ifdef CONFIG_XIP_KERNEL 272 269 /*
+8 -1
arch/arm/mm/mmu.c
··· 1609 1609 return; 1610 1610 1611 1611 /* 1612 + * Offset the kernel section physical offsets so that the kernel 1613 + * mapping will work out later on. 1614 + */ 1615 + kernel_sec_start += offset; 1616 + kernel_sec_end += offset; 1617 + 1618 + /* 1612 1619 * Get the address of the remap function in the 1:1 identity 1613 1620 * mapping setup by the early page table assembly code. We 1614 1621 * must get this prior to the pv update. The following barrier ··· 1723 1716 { 1724 1717 void *zero_page; 1725 1718 1726 - pr_debug("physical kernel sections: 0x%08x-0x%08x\n", 1719 + pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n", 1727 1720 kernel_sec_start, kernel_sec_end); 1728 1721 1729 1722 prepare_page_table();
+1 -1
arch/arm/mm/pv-fixup-asm.S
··· 29 29 ldr r6, =(_end - 1) 30 30 add r7, r2, #0x1000 31 31 add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER 32 - add r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER) 32 + add r7, r7, #KERNEL_OFFSET >> (SECTION_SHIFT - L2_ORDER) 33 33 1: ldrd r4, r5, [r7] 34 34 adds r4, r4, r0 35 35 adc r5, r5, r1