Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky: Add memory layout 2.5G(user):1.5G(kernel)

There are two ways for translating va to pa for csky:
- Use TLB(Translate Lookup Buffer) and PTW (Page Table Walk)
- Use SSEG0/1 (Simple Segment Mapping)

We use tlb mapping 0-2G and 3G-4G virtual address area and SSEG0/1
are for 2G-2.5G and 2.5G-3G translation. We could disable SSEG0
to use 2G-2.5G as TLB user mapping.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>

Guo Ren 0c8a32ee 7c53f6b6

+113 -51
+16
arch/csky/Kconfig
··· 192 192 endchoice 193 193 194 194 choice 195 + prompt "PAGE OFFSET" 196 + default PAGE_OFFSET_80000000 197 + 198 + config PAGE_OFFSET_80000000 199 + bool "PAGE OFFSET 2G (user:kernel = 2:2)" 200 + 201 + config PAGE_OFFSET_A0000000 202 + bool "PAGE OFFSET 2.5G (user:kernel = 2.5:1.5)" 203 + endchoice 204 + 205 + config PAGE_OFFSET 206 + hex 207 + default 0x80000000 if PAGE_OFFSET_80000000 208 + default 0xa0000000 if PAGE_OFFSET_A0000000 209 + choice 210 + 195 211 prompt "C-SKY PMU type" 196 212 depends on PERF_EVENTS 197 213 depends on CPU_CK807 || CPU_CK810 || CPU_CK860
+4 -4
arch/csky/abiv1/inc/abi/ckmmu.h
··· 89 89 cpwcr("cpcr8", 0x02000000); 90 90 } 91 91 92 - static inline void setup_pgd(unsigned long pgd, bool kernel) 92 + static inline void setup_pgd(pgd_t *pgd) 93 93 { 94 - cpwcr("cpcr29", pgd | BIT(0)); 94 + cpwcr("cpcr29", __pa(pgd) | BIT(0)); 95 95 } 96 96 97 - static inline unsigned long get_pgd(void) 97 + static inline pgd_t *get_pgd(void) 98 98 { 99 - return cprcr("cpcr29") & ~BIT(0); 99 + return __va(cprcr("cpcr29") & ~BIT(0)); 100 100 } 101 101 #endif /* __ASM_CSKY_CKMMUV1_H */
+7 -7
arch/csky/abiv2/inc/abi/ckmmu.h
··· 100 100 mtcr("cr<8, 15>", 0x02000000); 101 101 } 102 102 103 - static inline void setup_pgd(unsigned long pgd, bool kernel) 103 + static inline void setup_pgd(pgd_t *pgd) 104 104 { 105 - if (kernel) 106 - mtcr("cr<28, 15>", pgd | BIT(0)); 107 - else 108 - mtcr("cr<29, 15>", pgd | BIT(0)); 105 + #ifdef CONFIG_CPU_HAS_TLBI 106 + mtcr("cr<28, 15>", __pa(pgd) | BIT(0)); 107 + #endif 108 + mtcr("cr<29, 15>", __pa(pgd) | BIT(0)); 109 109 } 110 110 111 - static inline unsigned long get_pgd(void) 111 + static inline pgd_t *get_pgd(void) 112 112 { 113 - return mfcr("cr<29, 15>") & ~BIT(0); 113 + return __va(mfcr("cr<29, 15>") & ~BIT(0)); 114 114 } 115 115 #endif /* __ASM_CSKY_CKMMUV2_H */
+16 -3
arch/csky/abiv2/inc/abi/entry.h
··· 26 26 stw tls, (sp, 0) 27 27 stw lr, (sp, 4) 28 28 29 + RD_MEH lr 30 + WR_MEH lr 31 + 29 32 mfcr lr, epc 30 33 movi tls, \epc_inc 31 34 add lr, tls ··· 234 231 mtcr \rx, cr<8, 15> 235 232 .endm 236 233 234 + #ifdef CONFIG_PAGE_OFFSET_80000000 235 + #define MSA_SET cr<30, 15> 236 + #define MSA_CLR cr<31, 15> 237 + #endif 238 + 239 + #ifdef CONFIG_PAGE_OFFSET_A0000000 240 + #define MSA_SET cr<31, 15> 241 + #define MSA_CLR cr<30, 15> 242 + #endif 243 + 237 244 .macro SETUP_MMU 238 245 /* Init psr and enable ee */ 239 246 lrw r6, DEFAULT_PSR_VALUE ··· 294 281 * 31 - 29 | 28 - 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 295 282 * BA Reserved SH WA B SO SEC C D V 296 283 */ 297 - mfcr r6, cr<30, 15> /* Get MSA0 */ 284 + mfcr r6, MSA_SET /* Get MSA */ 298 285 2: 299 286 lsri r6, 29 300 287 lsli r6, 29 301 288 addi r6, 0x1ce 302 - mtcr r6, cr<30, 15> /* Set MSA0 */ 289 + mtcr r6, MSA_SET /* Set MSA */ 303 290 304 291 movi r6, 0 305 - mtcr r6, cr<31, 15> /* Clr MSA1 */ 292 + mtcr r6, MSA_CLR /* Clr MSA */ 306 293 307 294 /* enable MMU */ 308 295 mfcr r6, cr18
+1 -1
arch/csky/include/asm/memory.h
··· 10 10 11 11 #define FIXADDR_TOP _AC(0xffffc000, UL) 12 12 #define PKMAP_BASE _AC(0xff800000, UL) 13 - #define VMALLOC_START _AC(0xc0008000, UL) 13 + #define VMALLOC_START (PAGE_OFFSET + LOWMEM_LIMIT + (PAGE_SIZE * 8)) 14 14 #define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2)) 15 15 16 16 #ifdef CONFIG_HAVE_TCM
+1 -7
arch/csky/include/asm/mmu_context.h
··· 14 14 #include <linux/sched.h> 15 15 #include <abi/ckmmu.h> 16 16 17 - #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ 18 - setup_pgd(__pa(pgd), false) 19 - 20 - #define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \ 21 - setup_pgd(__pa(pgd), true) 22 - 23 17 #define ASID_MASK ((1 << CONFIG_CPU_ASID_BITS) - 1) 24 18 #define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK) 25 19 ··· 30 36 if (prev != next) 31 37 check_and_switch_context(next, cpu); 32 38 33 - TLBMISS_HANDLER_SETUP_PGD(next->pgd); 39 + setup_pgd(next->pgd); 34 40 write_mmu_entryhi(next->context.asid.counter); 35 41 36 42 flush_icache_deferred(next);
+1 -1
arch/csky/include/asm/page.h
··· 24 24 * address region. We use them mapping kernel 1GB direct-map address area and 25 25 * for more than 1GB of memory we use highmem. 26 26 */ 27 - #define PAGE_OFFSET 0x80000000 27 + #define PAGE_OFFSET CONFIG_PAGE_OFFSET 28 28 #define SSEG_SIZE 0x20000000 29 29 #define LOWMEM_LIMIT (SSEG_SIZE * 2) 30 30
+1 -1
arch/csky/include/asm/pgalloc.h
··· 71 71 } while (0) 72 72 73 73 extern void pagetable_init(void); 74 - extern void pre_mmu_init(void); 74 + extern void mmu_init(unsigned long min_pfn, unsigned long max_pfn); 75 75 extern void pre_trap_init(void); 76 76 77 77 #endif /* __ASM_CSKY_PGALLOC_H */
+1 -1
arch/csky/include/asm/pgtable.h
··· 14 14 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 15 15 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 16 16 17 - #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) 17 + #define USER_PTRS_PER_PGD (PAGE_OFFSET/PGDIR_SIZE) 18 18 #define FIRST_USER_ADDRESS 0UL 19 19 20 20 /*
+1 -1
arch/csky/include/asm/processor.h
··· 28 28 * for a 64 bit kernel expandable to 8192EB, of which the current CSKY 29 29 * implementations will "only" be able to use 1TB ... 30 30 */ 31 - #define TASK_SIZE 0x7fff8000UL 31 + #define TASK_SIZE (PAGE_OFFSET - (PAGE_SIZE * 8)) 32 32 33 33 #ifdef __KERNEL__ 34 34 #define STACK_TOP TASK_SIZE
+1 -1
arch/csky/include/asm/segment.h
··· 10 10 11 11 #define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF }) 12 12 13 - #define USER_DS ((mm_segment_t) { 0x80000000UL }) 13 + #define USER_DS ((mm_segment_t) { PAGE_OFFSET }) 14 14 #define get_fs() (current_thread_info()->addr_limit) 15 15 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 16 16 #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
+4
arch/csky/kernel/atomic.S
··· 14 14 */ 15 15 ENTRY(csky_cmpxchg) 16 16 USPTOKSP 17 + 18 + RD_MEH a3 19 + WR_MEH a3 20 + 17 21 mfcr a3, epc 18 22 addi a3, TRAP0_SIZE 19 23
+8 -2
arch/csky/kernel/entry.S
··· 49 49 50 50 RD_PGDR r6 51 51 RD_MEH a3 52 + WR_MEH a3 52 53 #ifdef CONFIG_CPU_HAS_TLBI 53 54 tlbi.vaas a3 54 55 sync.is ··· 65 64 WR_MCIR a2 66 65 #endif 67 66 bclri r6, 0 67 + lrw a2, PAGE_OFFSET 68 + add r6, a2 68 69 lrw a2, va_pa_offset 69 70 ld.w a2, (a2, 0) 70 71 subu r6, a2 71 - bseti r6, 31 72 72 73 73 mov a2, a3 74 74 lsri a2, _PGDIR_SHIFT ··· 77 75 addu r6, a2 78 76 ldw r6, (r6) 79 77 78 + lrw a2, PAGE_OFFSET 79 + add r6, a2 80 80 lrw a2, va_pa_offset 81 81 ld.w a2, (a2, 0) 82 82 subu r6, a2 83 - bseti r6, 31 84 83 85 84 lsri a3, PTE_INDX_SHIFT 86 85 lrw a2, PTE_INDX_MSK ··· 316 313  */ 317 314 ENTRY(csky_get_tls) 318 315 USPTOKSP 316 + 317 + RD_MEH a0 318 + WR_MEH a0 319 319 320 320 /* increase epc for continue */ 321 321 mfcr a0, epc
+8 -2
arch/csky/kernel/head.S
··· 21 21 ENTRY(_start_smp_secondary) 22 22 SETUP_MMU 23 23 24 - /* copy msa1 from CPU0 */ 25 - lrw r6, secondary_msa1 24 + #ifdef CONFIG_PAGE_OFFSET_80000000 25 + lrw r6, secondary_msa1 26 26 ld.w r6, (r6, 0) 27 27 mtcr r6, cr<31, 15> 28 + #endif 29 + 30 + lrw r6, secondary_pgd 31 + ld.w r6, (r6, 0) 32 + mtcr r6, cr<28, 15> 33 + mtcr r6, cr<29, 15> 28 34 29 35 /* set stack point */ 30 36 lrw r6, secondary_stack
+16 -2
arch/csky/kernel/setup.c
··· 45 45 46 46 if (size >= lowmem_size) { 47 47 max_low_pfn = min_low_pfn + lowmem_size; 48 + #ifdef CONFIG_PAGE_OFFSET_80000000 48 49 write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE); 50 + #endif 49 51 } else if (size > sseg_size) { 50 52 max_low_pfn = min_low_pfn + sseg_size; 51 53 } 52 54 53 55 max_zone_pfn[ZONE_NORMAL] = max_low_pfn; 56 + 57 + mmu_init(min_low_pfn, max_low_pfn); 54 58 55 59 #ifdef CONFIG_HIGHMEM 56 60 max_zone_pfn[ZONE_HIGHMEM] = max_pfn; ··· 105 101 unsigned long va_pa_offset; 106 102 EXPORT_SYMBOL(va_pa_offset); 107 103 104 + static inline unsigned long read_mmu_msa(void) 105 + { 106 + #ifdef CONFIG_PAGE_OFFSET_80000000 107 + return read_mmu_msa0(); 108 + #endif 109 + 110 + #ifdef CONFIG_PAGE_OFFSET_A0000000 111 + return read_mmu_msa1(); 112 + #endif 113 + } 114 + 108 115 asmlinkage __visible void __init csky_start(unsigned int unused, 109 116 void *dtb_start) 110 117 { 111 118 /* Clean up bss section */ 112 119 memset(__bss_start, 0, __bss_stop - __bss_start); 113 120 114 - va_pa_offset = read_mmu_msa0() & ~(SSEG_SIZE - 1); 121 + va_pa_offset = read_mmu_msa() & ~(SSEG_SIZE - 1); 115 122 116 123 pre_trap_init(); 117 - pre_mmu_init(); 118 124 119 125 if (dtb_start == NULL) 120 126 early_init_dt_scan(__dtb_start);
+3 -4
arch/csky/kernel/smp.c
··· 203 203 volatile unsigned int secondary_hint2; 204 204 volatile unsigned int secondary_ccr; 205 205 volatile unsigned int secondary_stack; 206 - 207 - unsigned long secondary_msa1; 206 + volatile unsigned int secondary_msa1; 207 + volatile unsigned int secondary_pgd; 208 208 209 209 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 210 210 { ··· 216 216 secondary_hint2 = mfcr("cr<21, 1>"); 217 217 secondary_ccr = mfcr("cr18"); 218 218 secondary_msa1 = read_mmu_msa1(); 219 + secondary_pgd = mfcr("cr<29, 15>"); 219 220 220 221 /* 221 222 * Because other CPUs are in reset status, we must flush data ··· 263 262 264 263 flush_tlb_all(); 265 264 write_mmu_pagemask(0); 266 - TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); 267 - TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); 268 265 269 266 #ifdef CONFIG_CPU_HAS_FPU 270 267 init_fpu();
+1 -1
arch/csky/kernel/vmlinux.lds.S
··· 33 33 34 34 .text : AT(ADDR(.text) - LOAD_OFFSET) { 35 35 _text = .; 36 + VBR_BASE 36 37 IRQENTRY_TEXT 37 38 SOFTIRQENTRY_TEXT 38 39 TEXT_TEXT ··· 105 104 106 105 EXCEPTION_TABLE(L1_CACHE_BYTES) 107 106 BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES) 108 - VBR_BASE 109 107 _end = . ; 110 108 111 109 STABS_DEBUG
+1 -6
arch/csky/mm/fault.c
··· 59 59 60 60 si_code = SEGV_MAPERR; 61 61 62 - #ifndef CONFIG_CPU_HAS_TLBI 63 62 /* 64 63 * We fault-in kernel-space virtual memory on-demand. The 65 64 * 'reference' page table is init_mm.pgd. ··· 83 84 pmd_t *pmd, *pmd_k; 84 85 pte_t *pte_k; 85 86 86 - unsigned long pgd_base; 87 - 88 - pgd_base = (unsigned long)__va(get_pgd()); 89 - pgd = (pgd_t *)pgd_base + offset; 87 + pgd = get_pgd() + offset; 90 88 pgd_k = init_mm.pgd + offset; 91 89 92 90 if (!pgd_present(*pgd_k)) ··· 106 110 goto no_context; 107 111 return; 108 112 } 109 - #endif 110 113 111 114 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 112 115 /*
+22 -7
arch/csky/mm/init.c
··· 28 28 #include <asm/mmu_context.h> 29 29 #include <asm/sections.h> 30 30 #include <asm/tlb.h> 31 + #include <asm/cacheflush.h> 31 32 32 33 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 33 34 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; 35 + pte_t kernel_pte_tables[(PTRS_PER_PGD - USER_PTRS_PER_PGD)*PTRS_PER_PTE] __page_aligned_bss; 36 + 34 37 EXPORT_SYMBOL(invalid_pte_table); 35 38 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] 36 39 __page_aligned_bss; ··· 133 130 134 131 for (i = 0; i < PTRS_PER_PGD; i++) 135 132 p[i] = __pa(invalid_pte_table); 133 + 134 + flush_tlb_all(); 135 + local_icache_inv_all(NULL); 136 136 } 137 137 138 - void __init pre_mmu_init(void) 138 + void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn) 139 139 { 140 - /* 141 - * Setup page-table and enable TLB-hardrefill 142 - */ 140 + int i; 141 + 142 + for (i = 0; i < USER_PTRS_PER_PGD; i++) 143 + swapper_pg_dir[i].pgd = __pa(invalid_pte_table); 144 + 145 + for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) 146 + swapper_pg_dir[i].pgd = 147 + __pa(kernel_pte_tables + (PTRS_PER_PTE * (i - USER_PTRS_PER_PGD))); 148 + 149 + for (i = min_pfn; i < max_pfn; i++) 150 + set_pte(&kernel_pte_tables[i - PFN_DOWN(va_pa_offset)], pfn_pte(i, PAGE_KERNEL)); 151 + 143 152 flush_tlb_all(); 144 - pgd_init((unsigned long *)swapper_pg_dir); 145 - TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); 146 - TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); 153 + local_icache_inv_all(NULL); 147 154 148 155 /* Setup page mask to 4k */ 149 156 write_mmu_pagemask(0); 157 + 158 + setup_pgd(swapper_pg_dir); 150 159 } 151 160 152 161 void __init fixrange_init(unsigned long start, unsigned long end,