Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xtensa: add MMU v3 support

MMUv3 comes out of reset with identity vaddr -> paddr mapping in the TLB
way 6:

Way 6 (512 MB)
Vaddr Paddr ASID Attr RWX Cache
---------- ---------- ---- ---- --- -------
0x00000000 0x00000000 0x01 0x03 RWX Bypass
0x20000000 0x20000000 0x01 0x03 RWX Bypass
0x40000000 0x40000000 0x01 0x03 RWX Bypass
0x60000000 0x60000000 0x01 0x03 RWX Bypass
0x80000000 0x80000000 0x01 0x03 RWX Bypass
0xa0000000 0xa0000000 0x01 0x03 RWX Bypass
0xc0000000 0xc0000000 0x01 0x03 RWX Bypass
0xe0000000 0xe0000000 0x01 0x03 RWX Bypass

This patch adds remapping code at the reset vector or at the kernel
_start (depending on CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) that
reconfigures MMUv3 as MMUv2:

Way 5 (128 MB)
Vaddr Paddr ASID Attr RWX Cache
---------- ---------- ---- ---- --- -------
0xd0000000 0x00000000 0x01 0x07 RWX WB
0xd8000000 0x00000000 0x01 0x03 RWX Bypass
Way 6 (256 MB)
Vaddr Paddr ASID Attr RWX Cache
---------- ---------- ---- ---- --- -------
0xe0000000 0xf0000000 0x01 0x07 RWX WB
0xf0000000 0xf0000000 0x01 0x03 RWX Bypass

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Chris Zankel <chris@zankel.net>

authored by

Max Filippov and committed by
Chris Zankel
e85e335f d83ff0bb

+451 -86
+29
arch/xtensa/Kconfig
··· 103 103 help 104 104 Can we use information of configuration file? 105 105 106 + config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX 107 + bool "Initialize Xtensa MMU inside the Linux kernel code" 108 + default y 109 + help 110 + Earlier version initialized the MMU in the exception vector 111 + before jumping to _startup in head.S and had an advantage that 112 + it was possible to place a software breakpoint at 'reset' and 113 + then enter your normal kernel breakpoints once the MMU was mapped 114 + to the kernel mappings (0XC0000000). 115 + 116 + This unfortunately doesn't work for U-Boot and likley also wont 117 + work for using KEXEC to have a hot kernel ready for doing a 118 + KDUMP. 119 + 120 + So now the MMU is initialized in head.S but it's necessary to 121 + use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup. 122 + xt-gdb can't place a Software Breakpoint in the 0XD region prior 123 + to mapping the MMU and after mapping even if the area of low memory 124 + was mapped gdb wouldn't remove the breakpoint on hitting it as the 125 + PC wouldn't match. Since Hardware Breakpoints are recommended for 126 + Linux configurations it seems reasonable to just assume they exist 127 + and leave this older mechanism for unfortunate souls that choose 128 + not to follow Tensilica's recommendation. 129 + 130 + Selecting this will cause U-Boot to set the KERNEL Load and Entry 131 + address at 0x00003000 instead of the mapped std of 0xD0003000. 132 + 133 + If in doubt, say Y. 134 + 106 135 endmenu 107 136 108 137 config XTENSA_CALIBRATE_CCOUNT
+1
arch/xtensa/boot/boot-elf/Makefile
··· 12 12 13 13 export OBJCOPY_ARGS 14 14 export CPPFLAGS_boot.lds += -P -C 15 + export KBUILD_AFLAGS += -mtext-section-literals 15 16 16 17 boot-y := bootstrap.o 17 18
+26 -38
arch/xtensa/boot/boot-elf/boot.lds.S
··· 1 - #include <variant/core.h> 1 + /* 2 + * linux/arch/xtensa/boot/boot-elf/boot.lds.S 3 + * 4 + * Copyright (C) 2008 - 2013 by Tensilica Inc. 5 + * 6 + * Chris Zankel <chris@zankel.net> 7 + * Marc Gauthier <marc@tensilica.com 8 + * Pete Delaney <piet@tensilica.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License version 2 as 12 + * published by the Free Software Foundation. 13 + */ 14 + 15 + #include <asm/vectors.h> 2 16 OUTPUT_ARCH(xtensa) 3 17 ENTRY(_ResetVector) 4 18 5 19 SECTIONS 6 20 { 7 - .start 0xD0000000 : { *(.start) } 8 - 9 - .text 0xD0000000: 21 + .ResetVector.text XCHAL_RESET_VECTOR_VADDR : 10 22 { 11 - __reloc_start = . ; 12 - _text_start = . ; 13 - *(.literal .text.literal .text) 14 - _text_end = . ; 23 + *(.ResetVector.text) 15 24 } 16 25 17 - .rodata ALIGN(0x04): 18 - { 19 - *(.rodata) 20 - *(.rodata1) 21 - } 22 - 23 - .data ALIGN(0x04): 24 - { 25 - *(.data) 26 - *(.data1) 27 - *(.sdata) 28 - *(.sdata2) 29 - *(.got.plt) 30 - *(.got) 31 - *(.dynamic) 32 - } 33 - 34 - __reloc_end = . ; 35 - 36 - . = ALIGN(0x10); 37 - __image_load = . ; 38 - .image 0xd0001000: 26 + .image KERNELOFFSET: AT (LOAD_MEMORY_ADDRESS) 39 27 { 40 28 _image_start = .; 41 29 *(image) 42 30 . = (. + 3) & ~ 3; 43 31 _image_end = . ; 44 32 } 45 - 46 33 47 34 .bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3): 48 35 { ··· 40 53 *(.bss) 41 54 __bss_end = .; 42 55 } 43 - _end = .; 44 - _param_start = .; 45 56 46 - .ResetVector.text XCHAL_RESET_VECTOR_VADDR : 57 + /* 58 + * This is a remapped copy of the Reset Vector Code. 59 + * It keeps gdb in sync with the PC after switching 60 + * to the temporary mapping used while setting up 61 + * the V2 MMU mappings for Linux. 62 + */ 63 + .ResetVector.remapped_text 0x46000000 (INFO): 47 64 { 48 - *(.ResetVector.text) 65 + *(.ResetVector.remapped_text) 49 66 } 50 - 51 - 52 - PROVIDE (end = .); 53 67 }
+86 -15
arch/xtensa/boot/boot-elf/bootstrap.S
··· 1 + /* 2 + * arch/xtensa/boot/boot-elf/bootstrap.S 3 + * 4 + * Low-level exception handling 5 + * 6 + * This file is subject to the terms and conditions of the GNU General Public 7 + * License. See the file "COPYING" in the main directory of this archive 8 + * for more details. 9 + * 10 + * Copyright (C) 2004 - 2013 by Tensilica Inc. 11 + * 12 + * Chris Zankel <chris@zankel.net> 13 + * Marc Gauthier <marc@tensilica.com> 14 + * Piet Delaney <piet@tensilica.com> 15 + */ 1 16 2 17 #include <asm/bootparam.h> 18 + #include <asm/processor.h> 19 + #include <asm/pgtable.h> 20 + #include <asm/page.h> 21 + #include <asm/cacheasm.h> 22 + #include <asm/initialize_mmu.h> 23 + #include <linux/linkage.h> 3 24 4 - 5 - /* ResetVector 6 - */ 7 - .section .ResetVector.text, "ax" 25 + .section .ResetVector.text, "ax" 8 26 .global _ResetVector 27 + .global reset 28 + 9 29 _ResetVector: 10 - _j reset 30 + _j _SetupMMU 31 + 32 + .begin no-absolute-literals 33 + .literal_position 34 + 11 35 .align 4 12 36 RomInitAddr: 13 - .word 0xd0001000 37 + #if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \ 38 + XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY 39 + .word 0x00003000 40 + #else 41 + .word 0xd0003000 42 + #endif 14 43 RomBootParam: 15 44 .word _bootparam 45 + _bootparam: 46 + .short BP_TAG_FIRST 47 + .short 4 48 + .long BP_VERSION 49 + .short BP_TAG_LAST 50 + .short 0 51 + .long 0 52 + 53 + .align 4 54 + _SetupMMU: 55 + movi a0, 0 56 + wsr a0, windowbase 57 + rsync 58 + movi a0, 1 59 + wsr a0, windowstart 60 + rsync 61 + movi a0, 0x1F 62 + wsr a0, ps 63 + rsync 64 + 65 + Offset = _SetupMMU - _ResetVector 66 + 67 + #ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX 68 + initialize_mmu 69 + #endif 70 + 71 + .end no-absolute-literals 72 + 73 + rsil a0, XCHAL_DEBUGLEVEL-1 74 + rsync 16 75 reset: 17 76 l32r a0, RomInitAddr 18 77 l32r a2, RomBootParam ··· 80 21 jx a0 81 22 82 23 .align 4 83 - .section .bootstrap.data, "aw" 84 24 85 - .globl _bootparam 86 - _bootparam: 87 - .short BP_TAG_FIRST 88 - .short 4 89 - .long BP_VERSION 90 - .short BP_TAG_LAST 91 - .short 0 92 - .long 0 25 + .section .ResetVector.remapped_text, "x" 26 + .global _RemappedResetVector 27 + 28 + /* Do org before literals */ 29 + .org 0 30 + 31 + _RemappedResetVector: 32 + .begin no-absolute-literals 33 + .literal_position 34 + 35 + _j _RemappedSetupMMU 36 + 37 + /* Position Remapped code at the same location as the original code */ 38 + . = _RemappedResetVector + Offset 39 + 40 + _RemappedSetupMMU: 41 + #ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX 42 + initialize_mmu 43 + #endif 44 + 45 + .end no-absolute-literals
+5 -1
arch/xtensa/boot/boot-uboot/Makefile
··· 4 4 # for more details. 5 5 # 6 6 7 - UIMAGE_LOADADDR = 0xd0001000 7 + ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX 8 + UIMAGE_LOADADDR = 0x00003000 9 + else 10 + UIMAGE_LOADADDR = 0xd0003000 11 + endif 8 12 UIMAGE_COMPRESSION = gzip 9 13 10 14 $(obj)/../uImage: vmlinux.bin.gz FORCE
+107
arch/xtensa/include/asm/initialize_mmu.h
··· 23 23 #ifndef _XTENSA_INITIALIZE_MMU_H 24 24 #define _XTENSA_INITIALIZE_MMU_H 25 25 26 + #include <asm/pgtable.h> 27 + #include <asm/vectors.h> 28 + 26 29 #ifdef __ASSEMBLY__ 27 30 28 31 #define XTENSA_HWVERSION_RC_2009_0 230000 ··· 50 47 #endif /* XCHAL_HAVE_S32C1I && 51 48 * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) 52 49 */ 50 + 51 + #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY 52 + /* 53 + * Have MMU v3 54 + */ 55 + 56 + #if !XCHAL_HAVE_VECBASE 57 + # error "MMU v3 requires reloc vectors" 58 + #endif 59 + 60 + movi a1, 0 61 + _call0 1f 62 + _j 2f 63 + 64 + .align 4 65 + 1: movi a2, 0x10000000 66 + movi a3, 0x18000000 67 + add a2, a2, a0 68 + 9: bgeu a2, a3, 9b /* PC is out of the expected range */ 69 + 70 + /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */ 71 + 72 + movi a2, 0x40000006 73 + idtlb a2 74 + iitlb a2 75 + isync 76 + 77 + /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code 78 + * and jump to the new mapping. 79 + */ 80 + #define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC) 81 + #define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC) 82 + 83 + srli a3, a0, 27 84 + slli a3, a3, 27 85 + addi a3, a3, CA_BYPASS 86 + addi a7, a2, -1 87 + wdtlb a3, a7 88 + witlb a3, a7 89 + isync 90 + 91 + slli a4, a0, 5 92 + srli a4, a4, 5 93 + addi a5, a2, -6 94 + add a4, a4, a5 95 + jx a4 96 + 97 + /* Step 3: unmap everything other than current area. 98 + * Start at 0x60000000, wrap around, and end with 0x20000000 99 + */ 100 + 2: movi a4, 0x20000000 101 + add a5, a2, a4 102 + 3: idtlb a5 103 + iitlb a5 104 + add a5, a5, a4 105 + bne a5, a2, 3b 106 + 107 + /* Step 4: Setup MMU with the old V2 mappings. */ 108 + movi a6, 0x01000000 109 + wsr a6, ITLBCFG 110 + wsr a6, DTLBCFG 111 + isync 112 + 113 + movi a5, 0xd0000005 114 + movi a4, CA_WRITEBACK 115 + wdtlb a4, a5 116 + witlb a4, a5 117 + 118 + movi a5, 0xd8000005 119 + movi a4, CA_BYPASS 120 + wdtlb a4, a5 121 + witlb a4, a5 122 + 123 + movi a5, 0xe0000006 124 + movi a4, 0xf0000000 + CA_WRITEBACK 125 + wdtlb a4, a5 126 + witlb a4, a5 127 + 128 + movi a5, 0xf0000006 129 + movi a4, 0xf0000000 + CA_BYPASS 130 + wdtlb a4, a5 131 + witlb a4, a5 132 + 133 + isync 134 + 135 + /* Jump to self, using MMU v2 mappings. */ 136 + movi a4, 1f 137 + jx a4 138 + 139 + 1: 140 + movi a2, VECBASE_RESET_VADDR 141 + wsr a2, vecbase 142 + 143 + /* Step 5: remove temporary mapping. */ 144 + idtlb a7 145 + iitlb a7 146 + isync 147 + 148 + movi a0, 0 149 + wsr a0, ptevaddr 150 + rsync 151 + 152 + #endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && 153 + XCHAL_HAVE_SPANNING_WAY */ 53 154 54 155 .endm 55 156
+125
arch/xtensa/include/asm/vectors.h
··· 1 + /* 2 + * arch/xtensa/include/asm/xchal_vaddr_remap.h 3 + * 4 + * Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual 5 + * Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU 6 + * mappings (KSEG at 0xD0000000 and KIO at 0XF0000000). 7 + * 8 + * This file is subject to the terms and conditions of the GNU General Public 9 + * License. See the file "COPYING" in the main directory of this archive 10 + * for more details. 11 + * 12 + * Copyright (C) 2008 - 2012 Tensilica Inc. 13 + * 14 + * Pete Delaney <piet@tensilica.com> 15 + * Marc Gauthier <marc@tensilica.com 16 + */ 17 + 18 + #ifndef _XTENSA_VECTORS_H 19 + #define _XTENSA_VECTORS_H 20 + 21 + #include <variant/core.h> 22 + 23 + #if defined(CONFIG_MMU) 24 + 25 + /* Will Become VECBASE */ 26 + #define VIRTUAL_MEMORY_ADDRESS 0xD0000000 27 + 28 + /* Image Virtual Start Address */ 29 + #define KERNELOFFSET 0xD0003000 30 + 31 + #if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY 32 + /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */ 33 + #define PHYSICAL_MEMORY_ADDRESS 0x00000000 34 + #define LOAD_MEMORY_ADDRESS 0x00003000 35 + #else 36 + /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */ 37 + #define PHYSICAL_MEMORY_ADDRESS 0xD0000000 38 + #define LOAD_MEMORY_ADDRESS 0xD0003000 39 + #endif 40 + 41 + #else /* !defined(CONFIG_MMU) */ 42 + /* MMU Not being used - Virtual == Physical */ 43 + 44 + /* VECBASE */ 45 + #define VIRTUAL_MEMORY_ADDRESS 0x00002000 46 + 47 + /* Location of the start of the kernel text, _start */ 48 + #define KERNELOFFSET 0x00003000 49 + #define PHYSICAL_MEMORY_ADDRESS 0x00000000 50 + 51 + /* Loaded just above possibly live vectors */ 52 + #define LOAD_MEMORY_ADDRESS 0x00003000 53 + 54 + #endif /* CONFIG_MMU */ 55 + 56 + #define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset) 57 + #define XC_PADDR(offset) (PHYSICAL_MEMORY_ADDRESS + offset) 58 + 59 + /* Used to set VECBASE register */ 60 + #define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS 61 + 62 + #define RESET_VECTOR_VECOFS (XCHAL_RESET_VECTOR_VADDR - \ 63 + VECBASE_RESET_VADDR) 64 + #define RESET_VECTOR_VADDR XC_VADDR(RESET_VECTOR_VECOFS) 65 + 66 + #define RESET_VECTOR1_VECOFS (XCHAL_RESET_VECTOR1_VADDR - \ 67 + VECBASE_RESET_VADDR) 68 + #define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS) 69 + 70 + #if XCHAL_HAVE_VECBASE 71 + 72 + #define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS) 73 + #define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS) 74 + #define DOUBLEEXC_VECTOR_VADDR XC_VADDR(XCHAL_DOUBLEEXC_VECOFS) 75 + #define WINDOW_VECTORS_VADDR XC_VADDR(XCHAL_WINDOW_OF4_VECOFS) 76 + #define INTLEVEL2_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL2_VECOFS) 77 + #define INTLEVEL3_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL3_VECOFS) 78 + #define INTLEVEL4_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL4_VECOFS) 79 + #define INTLEVEL5_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL5_VECOFS) 80 + #define INTLEVEL6_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL6_VECOFS) 81 + 82 + #define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS) 83 + 84 + #undef XCHAL_NMI_VECTOR_VADDR 85 + #define XCHAL_NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS) 86 + 87 + #undef XCHAL_INTLEVEL7_VECTOR_VADDR 88 + #define XCHAL_INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS) 89 + 90 + /* 91 + * These XCHAL_* #defines from varian/core.h 92 + * are not valid to use with V3 MMU. Non-XCHAL 93 + * constants are defined above and should be used. 94 + */ 95 + #undef XCHAL_VECBASE_RESET_VADDR 96 + #undef XCHAL_RESET_VECTOR0_VADDR 97 + #undef XCHAL_USER_VECTOR_VADDR 98 + #undef XCHAL_KERNEL_VECTOR_VADDR 99 + #undef XCHAL_DOUBLEEXC_VECTOR_VADDR 100 + #undef XCHAL_WINDOW_VECTORS_VADDR 101 + #undef XCHAL_INTLEVEL2_VECTOR_VADDR 102 + #undef XCHAL_INTLEVEL3_VECTOR_VADDR 103 + #undef XCHAL_INTLEVEL4_VECTOR_VADDR 104 + #undef XCHAL_INTLEVEL5_VECTOR_VADDR 105 + #undef XCHAL_INTLEVEL6_VECTOR_VADDR 106 + #undef XCHAL_DEBUG_VECTOR_VADDR 107 + #undef XCHAL_NMI_VECTOR_VADDR 108 + #undef XCHAL_INTLEVEL7_VECTOR_VADDR 109 + 110 + #else 111 + 112 + #define USER_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR 113 + #define KERNEL_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR 114 + #define DOUBLEEXC_VECTOR_VADDR XCHAL_DOUBLEEXC_VECTOR_VADDR 115 + #define WINDOW_VECTORS_VADDR XCHAL_WINDOW_VECTORS_VADDR 116 + #define INTLEVEL2_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR 117 + #define INTLEVEL3_VECTOR_VADDR XCHAL_INTLEVEL3_VECTOR_VADDR 118 + #define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR 119 + #define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR 120 + #define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR 121 + #define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR 122 + 123 + #endif 124 + 125 + #endif /* _XTENSA_VECTORS_H */
+2
arch/xtensa/kernel/Makefile
··· 12 12 obj-$(CONFIG_PCI) += pci.o 13 13 obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o 14 14 15 + AFLAGS_head.o += -mtext-section-literals 16 + 15 17 # In the Xtensa architecture, assembly generates literals which must always 16 18 # precede the L32R instruction with a relative offset less than 256 kB. 17 19 # Therefore, the .text and .literal section must be combined in parenthesis
+27 -10
arch/xtensa/kernel/head.S
··· 48 48 */ 49 49 50 50 __HEAD 51 + .begin no-absolute-literals 52 + 51 53 ENTRY(_start) 52 54 53 - _j 2f 55 + /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ 56 + wsr a2, excsave1 57 + _j _SetupMMU 58 + 54 59 .align 4 55 - 1: .word _startup 56 - 2: l32r a0, 1b 60 + .literal_position 61 + .Lstartup: 62 + .word _startup 63 + 64 + .align 4 65 + .global _SetupMMU 66 + _SetupMMU: 67 + Offset = _SetupMMU - _start 68 + 69 + #ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX 70 + initialize_mmu 71 + #endif 72 + .end no-absolute-literals 73 + 74 + l32r a0, .Lstartup 57 75 jx a0 58 76 59 77 ENDPROC(_start) 60 78 61 - .section .init.text, "ax" 79 + __INIT 80 + .literal_position 62 81 63 82 ENTRY(_startup) 64 83 ··· 85 66 86 67 movi a0, LOCKLEVEL 87 68 wsr a0, ps 88 - 89 - /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ 90 - 91 - wsr a2, excsave1 92 69 93 70 /* Start with a fresh windowbase and windowstart. */ 94 71 ··· 173 158 174 159 isync 175 160 176 - initialize_mmu 177 - 178 161 /* Unpack data sections 179 162 * 180 163 * The linker script used to build the Linux kernel image ··· 220 207 221 208 ___flush_dcache_all a2 a3 222 209 #endif 210 + memw 211 + isync 212 + ___invalidate_icache_all a2 a3 213 + isync 223 214 224 215 /* Setup stack and enable window exceptions (keep irqs disabled) */ 225 216
+2 -1
arch/xtensa/kernel/vectors.S
··· 50 50 #include <asm/processor.h> 51 51 #include <asm/page.h> 52 52 #include <asm/thread_info.h> 53 + #include <asm/vectors.h> 53 54 54 55 #define WINDOW_VECTORS_SIZE 0x180 55 56 ··· 221 220 222 221 xsr a0, depc # get DEPC, save a0 223 222 224 - movi a3, XCHAL_WINDOW_VECTORS_VADDR 223 + movi a3, WINDOW_VECTORS_VADDR 225 224 _bltu a0, a3, .Lfixup 226 225 addi a3, a3, WINDOW_VECTORS_SIZE 227 226 _bgeu a0, a3, .Lfixup
+32 -16
arch/xtensa/kernel/vmlinux.lds.S
··· 18 18 #include <asm/page.h> 19 19 #include <asm/thread_info.h> 20 20 21 + #include <asm/vectors.h> 21 22 #include <variant/core.h> 22 23 #include <platform/hardware.h> 23 24 OUTPUT_ARCH(xtensa) ··· 31 30 #endif 32 31 33 32 #ifndef KERNELOFFSET 34 - #define KERNELOFFSET 0xd0001000 33 + #define KERNELOFFSET 0xd0003000 35 34 #endif 36 35 37 36 /* Note: In the following macros, it would be nice to specify only the ··· 186 185 187 186 SECTION_VECTOR (_WindowVectors_text, 188 187 .WindowVectors.text, 189 - XCHAL_WINDOW_VECTORS_VADDR, 4, 188 + WINDOW_VECTORS_VADDR, 4, 190 189 .dummy) 191 190 SECTION_VECTOR (_DebugInterruptVector_literal, 192 191 .DebugInterruptVector.literal, 193 - XCHAL_DEBUG_VECTOR_VADDR - 4, 192 + DEBUG_VECTOR_VADDR - 4, 194 193 SIZEOF(.WindowVectors.text), 195 194 .WindowVectors.text) 196 195 SECTION_VECTOR (_DebugInterruptVector_text, 197 196 .DebugInterruptVector.text, 198 - XCHAL_DEBUG_VECTOR_VADDR, 197 + DEBUG_VECTOR_VADDR, 199 198 4, 200 199 .DebugInterruptVector.literal) 201 200 #undef LAST ··· 203 202 #if XCHAL_EXCM_LEVEL >= 2 204 203 SECTION_VECTOR (_Level2InterruptVector_text, 205 204 .Level2InterruptVector.text, 206 - XCHAL_INTLEVEL2_VECTOR_VADDR, 205 + INTLEVEL2_VECTOR_VADDR, 207 206 SIZEOF(LAST), LAST) 208 207 # undef LAST 209 208 # define LAST .Level2InterruptVector.text ··· 211 210 #if XCHAL_EXCM_LEVEL >= 3 212 211 SECTION_VECTOR (_Level3InterruptVector_text, 213 212 .Level3InterruptVector.text, 214 - XCHAL_INTLEVEL3_VECTOR_VADDR, 213 + INTLEVEL3_VECTOR_VADDR, 215 214 SIZEOF(LAST), LAST) 216 215 # undef LAST 217 216 # define LAST .Level3InterruptVector.text ··· 219 218 #if XCHAL_EXCM_LEVEL >= 4 220 219 SECTION_VECTOR (_Level4InterruptVector_text, 221 220 .Level4InterruptVector.text, 222 - XCHAL_INTLEVEL4_VECTOR_VADDR, 221 + INTLEVEL4_VECTOR_VADDR, 223 222 SIZEOF(LAST), LAST) 224 223 # undef LAST 225 224 # define LAST .Level4InterruptVector.text ··· 227 226 #if XCHAL_EXCM_LEVEL >= 5 228 227 SECTION_VECTOR (_Level5InterruptVector_text, 229 228 .Level5InterruptVector.text, 230 - XCHAL_INTLEVEL5_VECTOR_VADDR, 229 + INTLEVEL5_VECTOR_VADDR, 231 230 SIZEOF(LAST), LAST) 232 231 # undef LAST 233 232 # define LAST .Level5InterruptVector.text ··· 235 234 #if XCHAL_EXCM_LEVEL >= 6 236 235 SECTION_VECTOR (_Level6InterruptVector_text, 237 236 .Level6InterruptVector.text, 238 - XCHAL_INTLEVEL6_VECTOR_VADDR, 237 + INTLEVEL6_VECTOR_VADDR, 239 238 SIZEOF(LAST), LAST) 240 239 # undef LAST 241 240 # define LAST .Level6InterruptVector.text 242 241 #endif 243 242 SECTION_VECTOR (_KernelExceptionVector_literal, 244 243 .KernelExceptionVector.literal, 245 - XCHAL_KERNEL_VECTOR_VADDR - 4, 244 + KERNEL_VECTOR_VADDR - 4, 246 245 SIZEOF(LAST), LAST) 247 246 #undef LAST 248 247 SECTION_VECTOR (_KernelExceptionVector_text, 249 248 .KernelExceptionVector.text, 250 - XCHAL_KERNEL_VECTOR_VADDR, 249 + KERNEL_VECTOR_VADDR, 251 250 4, 252 251 .KernelExceptionVector.literal) 253 252 SECTION_VECTOR (_UserExceptionVector_literal, 254 253 .UserExceptionVector.literal, 255 - XCHAL_USER_VECTOR_VADDR - 4, 254 + USER_VECTOR_VADDR - 4, 256 255 SIZEOF(.KernelExceptionVector.text), 257 256 .KernelExceptionVector.text) 258 257 SECTION_VECTOR (_UserExceptionVector_text, 259 258 .UserExceptionVector.text, 260 - XCHAL_USER_VECTOR_VADDR, 259 + USER_VECTOR_VADDR, 261 260 4, 262 261 .UserExceptionVector.literal) 263 262 SECTION_VECTOR (_DoubleExceptionVector_literal, 264 263 .DoubleExceptionVector.literal, 265 - XCHAL_DOUBLEEXC_VECTOR_VADDR - 16, 264 + DOUBLEEXC_VECTOR_VADDR - 16, 266 265 SIZEOF(.UserExceptionVector.text), 267 266 .UserExceptionVector.text) 268 267 SECTION_VECTOR (_DoubleExceptionVector_text, 269 268 .DoubleExceptionVector.text, 270 - XCHAL_DOUBLEEXC_VECTOR_VADDR, 269 + DOUBLEEXC_VECTOR_VADDR, 271 270 32, 272 271 .DoubleExceptionVector.literal) 273 272 ··· 285 284 . = ALIGN(0x10); 286 285 .bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) } 287 286 288 - .ResetVector.text XCHAL_RESET_VECTOR_VADDR : 287 + .ResetVector.text RESET_VECTOR_VADDR : 289 288 { 290 289 *(.ResetVector.text) 291 290 } 291 + 292 + 293 + /* 294 + * This is a remapped copy of the Secondary Reset Vector Code. 295 + * It keeps gdb in sync with the PC after switching 296 + * to the temporary mapping used while setting up 297 + * the V2 MMU mappings for Linux. 298 + * 299 + * Only debug information about this section is put in the kernel image. 300 + */ 301 + .SecondaryResetVector.remapped_text 0x46000000 (INFO): 302 + { 303 + *(.SecondaryResetVector.remapped_text) 304 + } 305 + 292 306 293 307 .xt.lit : { *(.xt.lit) } 294 308 .xt.prop : { *(.xt.prop) }
+9 -5
arch/xtensa/mm/mmu.c
··· 24 24 */ 25 25 void __init init_mmu(void) 26 26 { 27 - /* Writing zeros to the <t>TLBCFG special registers ensure 28 - * that valid values exist in the register. For existing 29 - * PGSZID<w> fields, zero selects the first element of the 30 - * page-size array. For nonexistent PGSZID<w> fields, zero is 31 - * the best value to write. Also, when changing PGSZID<w> 27 + #if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) 28 + /* 29 + * Writing zeros to the instruction and data TLBCFG special 30 + * registers ensure that valid values exist in the register. 31 + * 32 + * For existing PGSZID<w> fields, zero selects the first element 33 + * of the page-size array. For nonexistent PGSZID<w> fields, 34 + * zero is the best value to write. Also, when changing PGSZID<w> 32 35 * fields, the corresponding TLB must be flushed. 33 36 */ 34 37 set_itlbcfg_register(0); 35 38 set_dtlbcfg_register(0); 39 + #endif 36 40 flush_tlb_all(); 37 41 38 42 /* Set rasid register to a known value. */