ARM: 9419/1: mm: Fix kernel memory mapping for xip kernels

The patchset introducing kernel_sec_start/end variables to separate the
kernel/lowmem memory mappings, broke the mapping of the kernel memory
for xipkernels.

kernel_sec_start/end variables are in RO area before the MMU is switched
on for xipkernels.
So these cannot be set early in boot in head.S. Fix this by setting these
after MMU is switched on.
xipkernels need two different mappings for kernel text (starting at
CONFIG_XIP_PHYS_ADDR) and data (starting at CONFIG_PHYS_OFFSET).
Also, move the kernel code mapping from devicemaps_init() to map_kernel().

Fixes: a91da5457085 ("ARM: 9089/1: Define kernel physical section start and end")
Signed-off-by: Harith George <harith.g@alifsemi.com>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>

authored by Harith G and committed by Russell King (Oracle) ed6cbe6e 9852d85e

Changed files
+27 -15
arch
arm
kernel
mm
+6 -2
arch/arm/kernel/head.S
··· 252 252 */ 253 253 add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER) 254 254 ldr r6, =(_end - 1) 255 + 256 + /* For XIP, kernel_sec_start/kernel_sec_end are currently in RO memory */ 257 + #ifndef CONFIG_XIP_KERNEL 255 258 adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) 256 259 #if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 257 260 str r8, [r5, #4] @ Save physical start of kernel (BE) 258 261 #else 259 262 str r8, [r5] @ Save physical start of kernel (LE) 263 + #endif 260 264 #endif 261 265 orr r3, r8, r7 @ Add the MMU flags 262 266 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER) ··· 268 264 add r3, r3, #1 << SECTION_SHIFT 269 265 cmp r0, r6 270 266 bls 1b 267 + #ifndef CONFIG_XIP_KERNEL 271 268 eor r3, r3, r7 @ Remove the MMU flags 272 269 adr_l r5, kernel_sec_end @ _pa(kernel_sec_end) 273 270 #if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 ··· 276 271 #else 277 272 str r3, [r5] @ Save physical end of kernel (LE) 278 273 #endif 279 - 280 - #ifdef CONFIG_XIP_KERNEL 274 + #else 281 275 /* 282 276 * Map the kernel image separately as it is not located in RAM. 283 277 */
+21 -13
arch/arm/mm/mmu.c
··· 1403 1403 } 1404 1404 1405 1405 /* 1406 - * Map the kernel if it is XIP. 1407 - * It is always first in the modulearea. 1408 - */ 1409 - #ifdef CONFIG_XIP_KERNEL 1410 - map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); 1411 - map.virtual = MODULES_VADDR; 1412 - map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; 1413 - map.type = MT_ROM; 1414 - create_mapping(&map); 1415 - #endif 1416 - 1417 - /* 1418 1406 * Map the cache flushing regions. 1419 1407 */ 1420 1408 #ifdef FLUSH_BASE ··· 1591 1603 * This will only persist until we turn on proper memory management later on 1592 1604 * and we remap the whole kernel with page granularity. 1593 1605 */ 1606 + #ifdef CONFIG_XIP_KERNEL 1607 + phys_addr_t kernel_nx_start = kernel_sec_start; 1608 + #else 1594 1609 phys_addr_t kernel_x_start = kernel_sec_start; 1595 1610 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1596 1611 phys_addr_t kernel_nx_start = kernel_x_end; 1612 + #endif 1597 1613 phys_addr_t kernel_nx_end = kernel_sec_end; 1598 1614 struct map_desc map; 1599 1615 1616 + /* 1617 + * Map the kernel if it is XIP. 1618 + * It is always first in the modulearea. 1619 + */ 1620 + #ifdef CONFIG_XIP_KERNEL 1621 + map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); 1622 + map.virtual = MODULES_VADDR; 1623 + map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; 1624 + map.type = MT_ROM; 1625 + create_mapping(&map); 1626 + #else 1600 1627 map.pfn = __phys_to_pfn(kernel_x_start); 1601 1628 map.virtual = __phys_to_virt(kernel_x_start); 1602 1629 map.length = kernel_x_end - kernel_x_start; ··· 1621 1618 /* If the nx part is small it may end up covered by the tail of the RWX section */ 1622 1619 if (kernel_x_end == kernel_nx_end) 1623 1620 return; 1624 - 1621 + #endif 1625 1622 map.pfn = __phys_to_pfn(kernel_nx_start); 1626 1623 map.virtual = __phys_to_virt(kernel_nx_start); 1627 1624 map.length = kernel_nx_end - kernel_nx_start; ··· 1767 1764 { 1768 1765 void *zero_page; 1769 1766 1767 + #ifdef CONFIG_XIP_KERNEL 1768 + /* Store the kernel RW RAM region start/end in these variables */ 1769 + kernel_sec_start = CONFIG_PHYS_OFFSET & SECTION_MASK; 1770 + kernel_sec_end = round_up(__pa(_end), SECTION_SIZE); 1771 + #endif 1770 1772 pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n", 1771 1773 kernel_sec_start, kernel_sec_end); 1772 1774