Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/8xx: Fix vaddr for IMMR early remap

Memory: 124428K/131072K available (3748K kernel code, 188K rwdata,
648K rodata, 508K init, 290K bss, 6644K reserved)
Kernel virtual memory layout:
* 0xfffdf000..0xfffff000 : fixmap
* 0xfde00000..0xfe000000 : consistent mem
* 0xfddf6000..0xfde00000 : early ioremap
* 0xc9000000..0xfddf6000 : vmalloc & ioremap
SLUB: HWalign=16, Order=0-3, MinObjects=0, CPUs=1, Nodes=1

Today, IMMR is mapped 1:1 at startup

Mapping IMMR 1:1 is just wrong because it may overlap with another
area. On most mpc8xx boards it is OK as IMMR is set to 0xff000000
but for instance on EP88xC board, IMMR is at 0xfa200000 which
overlaps with VM ioremap area

This patch fixes the virtual address for remapping IMMR with the fixmap
regardless of the value of IMMR.

The size of IMMR area is 256kbytes (CPM at offset 0, security engine
at offset 128k) so a 512k page is enough

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>

authored by

Christophe Leroy and committed by
Scott Wood
f86ef74e c223c903

+41 -10
+7
arch/powerpc/include/asm/fixmap.h
··· 51 51 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 52 52 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 53 53 #endif 54 + #ifdef CONFIG_PPC_8xx 55 + /* For IMMR we need an aligned 512K area */ 56 + #define FIX_IMMR_SIZE (512 * 1024 / PAGE_SIZE) 57 + FIX_IMMR_START, 58 + FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 + 59 + FIX_IMMR_SIZE, 60 + #endif 54 61 /* FIX_PCIE_MCFG, */ 55 62 __end_of_fixed_addresses 56 63 };
+3
arch/powerpc/include/asm/mmu-8xx.h
··· 169 169 unsigned int active; 170 170 unsigned long vdso_base; 171 171 } mm_context_t; 172 + 173 + #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) 174 + #define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE)) 172 175 #endif /* !__ASSEMBLY__ */ 173 176 174 177 #if defined(CONFIG_PPC_4K_PAGES)
+8
arch/powerpc/kernel/asm-offsets.c
··· 68 68 #include "../mm/mmu_decl.h" 69 69 #endif 70 70 71 + #ifdef CONFIG_PPC_8xx 72 + #include <asm/fixmap.h> 73 + #endif 74 + 71 75 int main(void) 72 76 { 73 77 DEFINE(THREAD, offsetof(struct task_struct, thread)); ··· 752 748 #endif 753 749 754 750 DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER); 751 + 752 + #ifdef CONFIG_PPC_8xx 753 + DEFINE(VIRT_IMMR_BASE, __fix_to_virt(FIX_IMMR_BASE)); 754 + #endif 755 755 756 756 return 0; 757 757 }
+6 -5
arch/powerpc/kernel/head_8xx.S
··· 30 30 #include <asm/ppc_asm.h> 31 31 #include <asm/asm-offsets.h> 32 32 #include <asm/ptrace.h> 33 + #include <asm/fixmap.h> 33 34 34 35 /* Macro to make the code more readable. */ 35 36 #ifdef CONFIG_8xx_CPU6 ··· 764 763 * virtual to physical. Also, set the cache mode since that is defined 765 764 * by TLB entries and perform any additional mapping (like of the IMMR). 766 765 * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel, 767 - * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by 766 + * 24 Mbytes of data, and the 512k IMMR space. Anything not covered by 768 767 * these mappings is mapped by page tables. 769 768 */ 770 769 initial_mmu: ··· 813 812 ori r8, r8, MD_APG_INIT@l 814 813 mtspr SPRN_MD_AP, r8 815 814 816 - /* Map another 8 MByte at the IMMR to get the processor 815 + /* Map a 512k page for the IMMR to get the processor 817 816 * internal registers (among other things). 818 817 */ 819 818 #ifdef CONFIG_PIN_TLB ··· 821 820 mtspr SPRN_MD_CTR, r10 822 821 #endif 823 822 mfspr r9, 638 /* Get current IMMR */ 824 - andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */ 823 + andis. r9, r9, 0xfff8 /* Get 512 kbytes boundary */ 825 824 826 - mr r8, r9 /* Create vaddr for TLB */ 825 + lis r8, VIRT_IMMR_BASE@h /* Create vaddr for TLB */ 827 826 ori r8, r8, MD_EVALID /* Mark it valid */ 828 827 mtspr SPRN_MD_EPN, r8 829 - li r8, MD_PS8MEG /* Set 8M byte page */ 828 + li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */ 830 829 ori r8, r8, MD_SVALID /* Make it valid */ 831 830 mtspr SPRN_MD_TWC, r8 832 831 mr r8, r9 /* Create paddr for TLB */
+17 -5
arch/powerpc/sysdev/cpm_common.c
··· 28 28 #include <asm/udbg.h> 29 29 #include <asm/io.h> 30 30 #include <asm/cpm.h> 31 + #include <asm/fixmap.h> 31 32 #include <soc/fsl/qe/qe.h> 32 33 33 34 #include <mm/mmu_decl.h> ··· 38 37 #endif 39 38 40 39 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM 41 - static u32 __iomem *cpm_udbg_txdesc = 42 - (u32 __iomem __force *)CONFIG_PPC_EARLY_DEBUG_CPM_ADDR; 40 + static u32 __iomem *cpm_udbg_txdesc; 41 + static u8 __iomem *cpm_udbg_txbuf; 43 42 44 43 static void udbg_putc_cpm(char c) 45 44 { 46 - u8 __iomem *txbuf = (u8 __iomem __force *)in_be32(&cpm_udbg_txdesc[1]); 47 - 48 45 if (c == '\n') 49 46 udbg_putc_cpm('\r'); 50 47 51 48 while (in_be32(&cpm_udbg_txdesc[0]) & 0x80000000) 52 49 ; 53 50 54 - out_8(txbuf, c); 51 + out_8(cpm_udbg_txbuf, c); 55 52 out_be32(&cpm_udbg_txdesc[0], 0xa0000001); 56 53 } 57 54 58 55 void __init udbg_init_cpm(void) 59 56 { 57 + #ifdef CONFIG_PPC_8xx 58 + cpm_udbg_txdesc = (u32 __iomem __force *) 59 + (CONFIG_PPC_EARLY_DEBUG_CPM_ADDR - PHYS_IMMR_BASE + 60 + VIRT_IMMR_BASE); 61 + cpm_udbg_txbuf = (u8 __iomem __force *) 62 + (in_be32(&cpm_udbg_txdesc[1]) - PHYS_IMMR_BASE + 63 + VIRT_IMMR_BASE); 64 + #else 65 + cpm_udbg_txdesc = (u32 __iomem __force *) 66 + CONFIG_PPC_EARLY_DEBUG_CPM_ADDR; 67 + cpm_udbg_txbuf = (u8 __iomem __force *)in_be32(&cpm_udbg_txdesc[1]); 68 + #endif 69 + 60 70 if (cpm_udbg_txdesc) { 61 71 #ifdef CONFIG_CPM2 62 72 setbat(1, 0xf0000000, 0xf0000000, 1024*1024, PAGE_KERNEL_NCG);