···11+// SPDX-License-Identifier: GPL-2.022+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.33+44+#include <linux/sched.h>55+#include <linux/signal.h>66+#include <linux/kernel.h>77+#include <linux/mm.h>88+#include <linux/module.h>99+#include <linux/user.h>1010+#include <linux/string.h>1111+#include <linux/linkage.h>1212+#include <linux/init.h>1313+#include <linux/ptrace.h>1414+#include <linux/kallsyms.h>1515+#include <linux/rtc.h>1616+#include <linux/uaccess.h>1717+1818+#include <asm/setup.h>1919+#include <asm/traps.h>2020+#include <asm/pgalloc.h>2121+#include <asm/siginfo.h>2222+2323+#include <asm/mmu_context.h>2424+2525+#ifdef CONFIG_CPU_HAS_FPU2626+#include <abi/fpu.h>2727+#endif2828+2929+/* Defined in entry.S */3030+asmlinkage void csky_trap(void);3131+3232+asmlinkage void csky_systemcall(void);3333+asmlinkage void csky_cmpxchg(void);3434+asmlinkage void csky_get_tls(void);3535+asmlinkage void csky_irq(void);3636+3737+asmlinkage void csky_tlbinvalidl(void);3838+asmlinkage void csky_tlbinvalids(void);3939+asmlinkage void csky_tlbmodified(void);4040+4141+/* Defined in head.S */4242+asmlinkage void _start_smp_secondary(void);4343+4444+void __init pre_trap_init(void)4545+{4646+ int i;4747+4848+ mtcr("vbr", vec_base);4949+5050+ for (i = 1; i < 128; i++)5151+ VEC_INIT(i, csky_trap);5252+}5353+5454+void __init trap_init(void)5555+{5656+ VEC_INIT(VEC_AUTOVEC, csky_irq);5757+5858+ /* setup trap0 trap2 trap3 */5959+ VEC_INIT(VEC_TRAP0, csky_systemcall);6060+ VEC_INIT(VEC_TRAP2, csky_cmpxchg);6161+ VEC_INIT(VEC_TRAP3, csky_get_tls);6262+6363+ /* setup MMU TLB exception */6464+ VEC_INIT(VEC_TLBINVALIDL, csky_tlbinvalidl);6565+ VEC_INIT(VEC_TLBINVALIDS, csky_tlbinvalids);6666+ VEC_INIT(VEC_TLBMODIFIED, csky_tlbmodified);6767+6868+#ifdef CONFIG_CPU_HAS_FPU6969+ init_fpu();7070+#endif7171+7272+#ifdef CONFIG_SMP7373+ mtcr("cr<28, 0>", virt_to_phys(vec_base));7474+7575+ VEC_INIT(VEC_RESET, (void *)virt_to_phys(_start_smp_secondary));7676+#endif7777+}7878+7979+void die_if_kernel(char *str, struct pt_regs *regs, int nr)8080+{8181+ if (user_mode(regs))8282+ return;8383+8484+ console_verbose();8585+ pr_err("%s: %08x\n", str, nr);8686+ show_regs(regs);8787+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);8888+ do_exit(SIGSEGV);8989+}9090+9191+void buserr(struct pt_regs *regs)9292+{9393+#ifdef CONFIG_CPU_CK8109494+ static unsigned long prev_pc;9595+9696+ if ((regs->pc == prev_pc) && prev_pc != 0) {9797+ prev_pc = 0;9898+ } else {9999+ prev_pc = regs->pc;100100+ return;101101+ }102102+#endif103103+104104+ die_if_kernel("Kernel mode BUS error", regs, 0);105105+106106+ pr_err("User mode Bus Error\n");107107+ show_regs(regs);108108+109109+ current->thread.esp0 = (unsigned long) regs;110110+ force_sig_fault(SIGSEGV, 0, (void __user *)regs->pc, current);111111+}112112+113113+#define USR_BKPT 0x1464114114+asmlinkage void trap_c(struct pt_regs *regs)115115+{116116+ int sig;117117+ unsigned long vector;118118+ siginfo_t info;119119+120120+ vector = (mfcr("psr") >> 16) & 0xff;121121+122122+ switch (vector) {123123+ case VEC_ZERODIV:124124+ sig = SIGFPE;125125+ break;126126+ /* ptrace */127127+ case VEC_TRACE:128128+ info.si_code = TRAP_TRACE;129129+ sig = SIGTRAP;130130+ break;131131+ case VEC_ILLEGAL:132132+#ifndef CONFIG_CPU_NO_USER_BKPT133133+ if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT)134134+#endif135135+ {136136+ sig = SIGILL;137137+ break;138138+ }139139+ /* gdbserver breakpoint */140140+ case VEC_TRAP1:141141+ /* jtagserver breakpoint */142142+ case VEC_BREAKPOINT:143143+ info.si_code = TRAP_BRKPT;144144+ sig = SIGTRAP;145145+ break;146146+ case VEC_ACCESS:147147+ return buserr(regs);148148+#ifdef CONFIG_CPU_NEED_SOFTALIGN149149+ case VEC_ALIGN:150150+ return csky_alignment(regs);151151+#endif152152+#ifdef CONFIG_CPU_HAS_FPU153153+ case VEC_FPE:154154+ return fpu_fpe(regs);155155+ case VEC_PRIV:156156+ if (fpu_libc_helper(regs))157157+ return;158158+#endif159159+ default:160160+ sig = SIGSEGV;161161+ break;162162+ }163163+ send_sig(sig, current, 0);164164+}165165+166166+asmlinkage void set_esp0(unsigned long ssp)167167+{168168+ current->thread.esp0 = ssp;169169+}
+212
arch/csky/mm/fault.c
···11+// SPDX-License-Identifier: GPL-2.022+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.33+44+#include <linux/signal.h>55+#include <linux/module.h>66+#include <linux/sched.h>77+#include <linux/interrupt.h>88+#include <linux/kernel.h>99+#include <linux/errno.h>1010+#include <linux/string.h>1111+#include <linux/types.h>1212+#include <linux/ptrace.h>1313+#include <linux/mman.h>1414+#include <linux/mm.h>1515+#include <linux/smp.h>1616+#include <linux/version.h>1717+#include <linux/vt_kern.h>1818+#include <linux/kernel.h>1919+#include <linux/extable.h>2020+#include <linux/uaccess.h>2121+2222+#include <asm/hardirq.h>2323+#include <asm/mmu_context.h>2424+#include <asm/traps.h>2525+#include <asm/page.h>2626+2727+int fixup_exception(struct pt_regs *regs)2828+{2929+ const struct exception_table_entry *fixup;3030+3131+ fixup = search_exception_tables(instruction_pointer(regs));3232+ if (fixup) {3333+ regs->pc = fixup->nextinsn;3434+3535+ return 1;3636+ }3737+3838+ return 0;3939+}4040+4141+/*4242+ * This routine handles page faults. It determines the address,4343+ * and the problem, and then passes it off to one of the appropriate4444+ * routines.4545+ */4646+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,4747+ unsigned long mmu_meh)4848+{4949+ struct vm_area_struct *vma = NULL;5050+ struct task_struct *tsk = current;5151+ struct mm_struct *mm = tsk->mm;5252+ int si_code;5353+ int fault;5454+ unsigned long address = mmu_meh & PAGE_MASK;5555+5656+ si_code = SEGV_MAPERR;5757+5858+#ifndef CONFIG_CPU_HAS_TLBI5959+ /*6060+ * We fault-in kernel-space virtual memory on-demand. The6161+ * 'reference' page table is init_mm.pgd.6262+ *6363+ * NOTE! We MUST NOT take any locks for this case. We may6464+ * be in an interrupt or a critical region, and should6565+ * only copy the information from the master page table,6666+ * nothing more.6767+ */6868+ if (unlikely(address >= VMALLOC_START) &&6969+ unlikely(address <= VMALLOC_END)) {7070+ /*7171+ * Synchronize this task's top level page-table7272+ * with the 'reference' page table.7373+ *7474+ * Do _not_ use "tsk" here. We might be inside7575+ * an interrupt in the middle of a task switch..7676+ */7777+ int offset = __pgd_offset(address);7878+ pgd_t *pgd, *pgd_k;7979+ pud_t *pud, *pud_k;8080+ pmd_t *pmd, *pmd_k;8181+ pte_t *pte_k;8282+8383+ unsigned long pgd_base;8484+8585+ pgd_base = tlb_get_pgd();8686+ pgd = (pgd_t *)pgd_base + offset;8787+ pgd_k = init_mm.pgd + offset;8888+8989+ if (!pgd_present(*pgd_k))9090+ goto no_context;9191+ set_pgd(pgd, *pgd_k);9292+9393+ pud = (pud_t *)pgd;9494+ pud_k = (pud_t *)pgd_k;9595+ if (!pud_present(*pud_k))9696+ goto no_context;9797+9898+ pmd = pmd_offset(pud, address);9999+ pmd_k = pmd_offset(pud_k, address);100100+ if (!pmd_present(*pmd_k))101101+ goto no_context;102102+ set_pmd(pmd, *pmd_k);103103+104104+ pte_k = pte_offset_kernel(pmd_k, address);105105+ if (!pte_present(*pte_k))106106+ goto no_context;107107+ return;108108+ }109109+#endif110110+ /*111111+ * If we're in an interrupt or have no user112112+ * context, we must not take the fault..113113+ */114114+ if (in_atomic() || !mm)115115+ goto bad_area_nosemaphore;116116+117117+ down_read(&mm->mmap_sem);118118+ vma = find_vma(mm, address);119119+ if (!vma)120120+ goto bad_area;121121+ if (vma->vm_start <= address)122122+ goto good_area;123123+ if (!(vma->vm_flags & VM_GROWSDOWN))124124+ goto bad_area;125125+ if (expand_stack(vma, address))126126+ goto bad_area;127127+ /*128128+ * Ok, we have a good vm_area for this memory access, so129129+ * we can handle it..130130+ */131131+good_area:132132+ si_code = SEGV_ACCERR;133133+134134+ if (write) {135135+ if (!(vma->vm_flags & VM_WRITE))136136+ goto bad_area;137137+ } else {138138+ if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))139139+ goto bad_area;140140+ }141141+142142+ /*143143+ * If for any reason at all we couldn't handle the fault,144144+ * make sure we exit gracefully rather than endlessly redo145145+ * the fault.146146+ */147147+ fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0);148148+ if (unlikely(fault & VM_FAULT_ERROR)) {149149+ if (fault & VM_FAULT_OOM)150150+ goto out_of_memory;151151+ else if (fault & VM_FAULT_SIGBUS)152152+ goto do_sigbus;153153+ else if (fault & VM_FAULT_SIGSEGV)154154+ goto bad_area;155155+ BUG();156156+ }157157+ if (fault & VM_FAULT_MAJOR)158158+ tsk->maj_flt++;159159+ else160160+ tsk->min_flt++;161161+162162+ up_read(&mm->mmap_sem);163163+ return;164164+165165+ /*166166+ * Something tried to access memory that isn't in our memory map..167167+ * Fix it, but check if it's kernel or user first..168168+ */169169+bad_area:170170+ up_read(&mm->mmap_sem);171171+172172+bad_area_nosemaphore:173173+ /* User mode accesses just cause a SIGSEGV */174174+ if (user_mode(regs)) {175175+ tsk->thread.address = address;176176+ tsk->thread.error_code = write;177177+ force_sig_fault(SIGSEGV, si_code, (void __user *)address, current);178178+ return;179179+ }180180+181181+no_context:182182+ /* Are we prepared to handle this kernel fault? */183183+ if (fixup_exception(regs))184184+ return;185185+186186+ /*187187+ * Oops. The kernel tried to access some bad page. We'll have to188188+ * terminate things with extreme prejudice.189189+ */190190+ bust_spinlocks(1);191191+ pr_alert("Unable to %s at vaddr: %08lx, epc: %08lx\n",192192+ __func__, address, regs->pc);193193+ die_if_kernel("Oops", regs, write);194194+195195+out_of_memory:196196+ /*197197+ * We ran out of memory, call the OOM killer, and return the userspace198198+ * (which will retry the fault, or kill us if we got oom-killed).199199+ */200200+ pagefault_out_of_memory();201201+ return;202202+203203+do_sigbus:204204+ up_read(&mm->mmap_sem);205205+206206+ /* Kernel mode? Handle exceptions or die */207207+ if (!user_mode(regs))208208+ goto no_context;209209+210210+ tsk->thread.address = address;211211+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current);212212+}