Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky: Remove prologue of page fault handler in entry.S

There is a prologue on page fault handler which marking pages dirty
and/or accessed in page attributes, but all of these have been
handled in handle_pte_fault.

- Add flush_tlb_one in vmalloc page fault instead of prologue.
- Using cmxchg_fixup C codes in do_page_fault instead of ASM one.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>

Guo Ren b0ae5e26 7b513cf2

+48 -131
+2 -18
arch/csky/kernel/atomic.S
··· 40 40 2: 41 41 sync.is 42 42 #else 43 - 1: 43 + GLOBAL(csky_cmpxchg_ldw) 44 44 ldw a3, (a2) 45 45 cmpne a0, a3 46 46 bt16 3f 47 - 2: 47 + GLOBAL(csky_cmpxchg_stw) 48 48 stw a1, (a2) 49 49 3: 50 50 #endif ··· 59 59 KSPTOUSP 60 60 rte 61 61 END(csky_cmpxchg) 62 - 63 - #ifndef CONFIG_CPU_HAS_LDSTEX 64 - /* 65 - * Called from tlbmodified exception 66 - */ 67 - ENTRY(csky_cmpxchg_fixup) 68 - mfcr a0, epc 69 - lrw a1, 2b 70 - cmpne a1, a0 71 - bt 1f 72 - subi a1, (2b - 1b) 73 - stw a1, (sp, LSAVE_PC) 74 - 1: 75 - rts 76 - END(csky_cmpxchg_fixup) 77 - #endif
+2 -102
arch/csky/kernel/entry.S
··· 13 13 #include <asm/page.h> 14 14 #include <asm/thread_info.h> 15 15 16 - #define PTE_INDX_MSK 0xffc 17 - #define PTE_INDX_SHIFT 10 18 - #define _PGDIR_SHIFT 22 19 - 20 16 .macro zero_fp 21 17 #ifdef CONFIG_STACKTRACE 22 18 movi r8, 0 ··· 37 41 #endif 38 42 .endm 39 43 40 - .macro tlbop_begin name, val0, val1, val2 41 - ENTRY(csky_\name) 42 - mtcr a3, ss2 43 - mtcr r6, ss3 44 - mtcr a2, ss4 45 - 46 - RD_PGDR r6 47 - RD_MEH a3 48 - WR_MEH a3 49 - #ifdef CONFIG_CPU_HAS_TLBI 50 - tlbi.vaas a3 51 - sync.is 52 - 53 - btsti a3, 31 54 - bf 1f 55 - RD_PGDR_K r6 56 - 1: 57 - #else 58 - bgeni a2, 31 59 - WR_MCIR a2 60 - bgeni a2, 25 61 - WR_MCIR a2 62 - #endif 63 - bclri r6, 0 64 - lrw a2, PAGE_OFFSET 65 - add r6, a2 66 - lrw a2, va_pa_offset 67 - ld.w a2, (a2, 0) 68 - subu r6, a2 69 - 70 - mov a2, a3 71 - lsri a2, _PGDIR_SHIFT 72 - lsli a2, 2 73 - addu r6, a2 74 - ldw r6, (r6) 75 - 76 - lrw a2, PAGE_OFFSET 77 - add r6, a2 78 - lrw a2, va_pa_offset 79 - ld.w a2, (a2, 0) 80 - subu r6, a2 81 - 82 - lsri a3, PTE_INDX_SHIFT 83 - lrw a2, PTE_INDX_MSK 84 - and a3, a2 85 - addu r6, a3 86 - ldw a3, (r6) 87 - 88 - movi a2, (_PAGE_PRESENT | \val0) 89 - and a3, a2 90 - cmpne a3, a2 91 - bt \name 92 - 93 - /* First read/write the page, just update the flags */ 94 - ldw a3, (r6) 95 - bgeni a2, PAGE_VALID_BIT 96 - bseti a2, PAGE_ACCESSED_BIT 97 - bseti a2, \val1 98 - bseti a2, \val2 99 - or a3, a2 100 - stw a3, (r6) 101 - 102 - /* Some cpu tlb-hardrefill bypass the cache */ 103 - #ifdef CONFIG_CPU_NEED_TLBSYNC 104 - movi a2, 0x22 105 - bseti a2, 6 106 - mtcr r6, cr22 107 - mtcr a2, cr17 108 - sync 109 - #endif 110 - 111 - mfcr a3, ss2 112 - mfcr r6, ss3 113 - mfcr a2, ss4 114 - rte 115 - \name: 116 - mfcr a3, ss2 117 - mfcr r6, ss3 118 - mfcr a2, ss4 44 + .text 45 + ENTRY(csky_pagefault) 119 46 SAVE_ALL 0 120 - .endm 121 - .macro tlbop_end is_write 122 47 zero_fp 123 48 context_tracking 124 - RD_MEH a2 125 49 psrset ee, ie 126 50 mov a0, sp 127 - movi a1, \is_write 128 51 jbsr do_page_fault 129 52 jmpi ret_from_exception 130 - .endm 131 - 132 - .text 133 - 134 - tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT 135 - tlbop_end 0 136 - 137 - tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT 138 - tlbop_end 1 139 - 140 - tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT 141 - #ifndef CONFIG_CPU_HAS_LDSTEX 142 - jbsr csky_cmpxchg_fixup 143 - #endif 144 - tlbop_end 1 145 53 146 54 ENTRY(csky_systemcall) 147 55 SAVE_ALL TRAP0_SIZE
+4 -6
arch/csky/kernel/traps.c
··· 39 39 asmlinkage void csky_get_tls(void); 40 40 asmlinkage void csky_irq(void); 41 41 42 - asmlinkage void csky_tlbinvalidl(void); 43 - asmlinkage void csky_tlbinvalids(void); 44 - asmlinkage void csky_tlbmodified(void); 42 + asmlinkage void csky_pagefault(void); 45 43 46 44 /* Defined in head.S */ 47 45 asmlinkage void _start_smp_secondary(void); ··· 64 66 VEC_INIT(VEC_TRAP3, csky_get_tls); 65 67 66 68 /* setup MMU TLB exception */ 67 - VEC_INIT(VEC_TLBINVALIDL, csky_tlbinvalidl); 68 - VEC_INIT(VEC_TLBINVALIDS, csky_tlbinvalids); 69 - VEC_INIT(VEC_TLBMODIFIED, csky_tlbmodified); 69 + VEC_INIT(VEC_TLBINVALIDL, csky_pagefault); 70 + VEC_INIT(VEC_TLBINVALIDS, csky_pagefault); 71 + VEC_INIT(VEC_TLBMODIFIED, csky_pagefault); 70 72 71 73 #ifdef CONFIG_CPU_HAS_FPU 72 74 init_fpu();
+40 -5
arch/csky/mm/fault.c
··· 39 39 return 0; 40 40 } 41 41 42 + static inline bool is_write(struct pt_regs *regs) 43 + { 44 + switch (trap_no(regs)) { 45 + case VEC_TLBINVALIDS: 46 + return true; 47 + case VEC_TLBMODIFIED: 48 + return true; 49 + } 50 + 51 + return false; 52 + } 53 + 54 + #ifdef CONFIG_CPU_HAS_LDSTEX 55 + static inline void csky_cmpxchg_fixup(struct pt_regs *regs) 56 + { 57 + return; 58 + } 59 + #else 60 + extern unsigned long csky_cmpxchg_ldw; 61 + extern unsigned long csky_cmpxchg_stw; 62 + static inline void csky_cmpxchg_fixup(struct pt_regs *regs) 63 + { 64 + if (trap_no(regs) != VEC_TLBMODIFIED) 65 + return; 66 + 67 + if (instruction_pointer(regs) == csky_cmpxchg_stw) 68 + instruction_pointer_set(regs, csky_cmpxchg_ldw); 69 + return; 70 + } 71 + #endif 72 + 42 73 /* 43 74 * This routine handles page faults. It determines the address, 44 75 * and the problem, and then passes it off to one of the appropriate 45 76 * routines. 46 77 */ 47 - asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, 48 - unsigned long mmu_meh) 78 + asmlinkage void do_page_fault(struct pt_regs *regs) 49 79 { 50 80 struct vm_area_struct *vma = NULL; 51 81 struct task_struct *tsk = current; 52 82 struct mm_struct *mm = tsk->mm; 53 83 int si_code; 54 84 int fault; 55 - unsigned long address = mmu_meh & PAGE_MASK; 85 + unsigned long address = read_mmu_entryhi() & PAGE_MASK; 86 + 87 + csky_cmpxchg_fixup(regs); 56 88 57 89 if (kprobe_page_fault(regs, tsk->thread.trap_no)) 58 90 return; ··· 136 104 pte_k = pte_offset_kernel(pmd_k, address); 137 105 if (!pte_present(*pte_k)) 138 106 goto no_context; 107 + 108 + flush_tlb_one(address); 109 + 139 110 return; 140 111 } 141 112 ··· 167 132 good_area: 168 133 si_code = SEGV_ACCERR; 169 134 170 - if (write) { 135 + if (is_write(regs)) { 171 136 if (!(vma->vm_flags & VM_WRITE)) 172 137 goto bad_area; 173 138 } else { ··· 180 145 * make sure we exit gracefully rather than endlessly redo 181 146 * the fault. 182 147 */ 183 - fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0, 148 + fault = handle_mm_fault(vma, address, is_write(regs) ? FAULT_FLAG_WRITE : 0, 184 149 regs); 185 150 if (unlikely(fault & VM_FAULT_ERROR)) { 186 151 if (fault & VM_FAULT_OOM)