···11+/* SPDX-License-Identifier: GPL-2.0 */22+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.33+44+#ifndef __ASM_CSKY_MMU_CONTEXT_H55+#define __ASM_CSKY_MMU_CONTEXT_H66+77+#include <asm-generic/mm_hooks.h>88+#include <asm/setup.h>99+#include <asm/page.h>1010+#include <asm/cacheflush.h>1111+#include <asm/tlbflush.h>1212+1313+#include <linux/errno.h>1414+#include <linux/sched.h>1515+#include <abi/ckmmu.h>1616+1717+static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel)1818+{1919+ pgd &= ~(1<<31);2020+ pgd += PHYS_OFFSET;2121+ pgd |= 1;2222+ setup_pgd(pgd, kernel);2323+}2424+2525+#define TLBMISS_HANDLER_SETUP_PGD(pgd) \2626+ tlbmiss_handler_setup_pgd((unsigned long)pgd, 0)2727+#define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \2828+ tlbmiss_handler_setup_pgd((unsigned long)pgd, 1)2929+3030+static inline unsigned long tlb_get_pgd(void)3131+{3232+ return ((get_pgd()|(1<<31)) - PHYS_OFFSET) & ~1;3333+}3434+3535+#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])3636+#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)3737+#define asid_cache(cpu) (cpu_data[cpu].asid_cache)3838+3939+#define ASID_FIRST_VERSION (1 << CONFIG_CPU_ASID_BITS)4040+#define ASID_INC 0x14141+#define ASID_MASK (ASID_FIRST_VERSION - 1)4242+#define ASID_VERSION_MASK ~ASID_MASK4343+4444+#define destroy_context(mm) do {} while (0)4545+#define enter_lazy_tlb(mm, tsk) do {} while (0)4646+#define deactivate_mm(tsk, mm) do {} while (0)4747+4848+/*4949+ * All unused by hardware upper bits will be considered5050+ * as a software asid extension.5151+ */5252+static inline void5353+get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)5454+{5555+ unsigned long asid = asid_cache(cpu);5656+5757+ asid += ASID_INC;5858+ if (!(asid & ASID_MASK)) {5959+ flush_tlb_all(); /* start new asid cycle */6060+ if (!asid) /* fix version if needed */6161+ asid = ASID_FIRST_VERSION;6262+ }6363+ cpu_context(cpu, mm) = asid_cache(cpu) = asid;6464+}6565+6666+/*6767+ * Initialize the context related info for a new mm_struct6868+ * instance.6969+ */7070+static inline int7171+init_new_context(struct task_struct *tsk, struct mm_struct *mm)7272+{7373+ int i;7474+7575+ for_each_online_cpu(i)7676+ cpu_context(i, mm) = 0;7777+ return 0;7878+}7979+8080+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,8181+ struct task_struct *tsk)8282+{8383+ unsigned int cpu = smp_processor_id();8484+ unsigned long flags;8585+8686+ local_irq_save(flags);8787+ /* Check if our ASID is of an older version and thus invalid */8888+ if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)8989+ get_new_mmu_context(next, cpu);9090+ write_mmu_entryhi(cpu_asid(cpu, next));9191+ TLBMISS_HANDLER_SETUP_PGD(next->pgd);9292+9393+ /*9494+ * Mark current->active_mm as not "active" anymore.9595+ * We don't want to mislead possible IPI tlb flush routines.9696+ */9797+ cpumask_clear_cpu(cpu, mm_cpumask(prev));9898+ cpumask_set_cpu(cpu, mm_cpumask(next));9999+100100+ local_irq_restore(flags);101101+}102102+103103+/*104104+ * After we have set current->mm to a new value, this activates105105+ * the context for the new mm so we see the new mappings.106106+ */107107+static inline void108108+activate_mm(struct mm_struct *prev, struct mm_struct *next)109109+{110110+ unsigned long flags;111111+ int cpu = smp_processor_id();112112+113113+ local_irq_save(flags);114114+115115+ /* Unconditionally get a new ASID. */116116+ get_new_mmu_context(next, cpu);117117+118118+ write_mmu_entryhi(cpu_asid(cpu, next));119119+ TLBMISS_HANDLER_SETUP_PGD(next->pgd);120120+121121+ /* mark mmu ownership change */122122+ cpumask_clear_cpu(cpu, mm_cpumask(prev));123123+ cpumask_set_cpu(cpu, mm_cpumask(next));124124+125125+ local_irq_restore(flags);126126+}127127+128128+/*129129+ * If mm is currently active_mm, we can't really drop it. Instead,130130+ * we will get a new one for it.131131+ */132132+static inline void133133+drop_mmu_context(struct mm_struct *mm, unsigned int cpu)134134+{135135+ unsigned long flags;136136+137137+ local_irq_save(flags);138138+139139+ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {140140+ get_new_mmu_context(mm, cpu);141141+ write_mmu_entryhi(cpu_asid(cpu, mm));142142+ } else {143143+ /* will get a new context next time */144144+ cpu_context(cpu, mm) = 0;145145+ }146146+147147+ local_irq_restore(flags);148148+}149149+150150+#endif /* __ASM_CSKY_MMU_CONTEXT_H */
+121
arch/csky/include/asm/processor.h
···11+/* SPDX-License-Identifier: GPL-2.0 */22+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.33+44+#ifndef __ASM_CSKY_PROCESSOR_H55+#define __ASM_CSKY_PROCESSOR_H66+77+/*88+ * Default implementation of macro that returns current99+ * instruction pointer ("program counter").1010+ */1111+#define current_text_addr() ({ __label__ _l; _l: &&_l; })1212+1313+#include <linux/bitops.h>1414+#include <asm/segment.h>1515+#include <asm/ptrace.h>1616+#include <asm/current.h>1717+#include <asm/cache.h>1818+#include <abi/reg_ops.h>1919+#include <abi/regdef.h>2020+#ifdef CONFIG_CPU_HAS_FPU2121+#include <abi/fpu.h>2222+#endif2323+2424+struct cpuinfo_csky {2525+ unsigned long udelay_val;2626+ unsigned long asid_cache;2727+ /*2828+ * Capability and feature descriptor structure for CSKY CPU2929+ */3030+ unsigned long options;3131+ unsigned int processor_id[4];3232+ unsigned int fpu_id;3333+} __aligned(SMP_CACHE_BYTES);3434+3535+extern struct cpuinfo_csky cpu_data[];3636+3737+/*3838+ * User space process size: 2GB. This is hardcoded into a few places,3939+ * so don't change it unless you know what you are doing. TASK_SIZE4040+ * for a 64 bit kernel expandable to 8192EB, of which the current CSKY4141+ * implementations will "only" be able to use 1TB ...4242+ */4343+#define TASK_SIZE 0x7fff8000UL4444+4545+#ifdef __KERNEL__4646+#define STACK_TOP TASK_SIZE4747+#define STACK_TOP_MAX STACK_TOP4848+#endif4949+5050+/* This decides where the kernel will search for a free chunk of vm5151+ * space during mmap's.5252+ */5353+#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)5454+5555+struct thread_struct {5656+ unsigned long ksp; /* kernel stack pointer */5757+ unsigned long sr; /* saved status register */5858+ unsigned long esp0; /* points to SR of stack frame */5959+ unsigned long hi;6060+ unsigned long lo;6161+6262+ /* Other stuff associated with the thread. */6363+ unsigned long address; /* Last user fault */6464+ unsigned long error_code;6565+6666+ /* FPU regs */6767+ struct user_fp __aligned(16) user_fp;6868+};6969+7070+#define INIT_THREAD { \7171+ .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \7272+ .sr = DEFAULT_PSR_VALUE, \7373+}7474+7575+/*7676+ * Do necessary setup to start up a newly executed thread.7777+ *7878+ * pass the data segment into user programs if it exists,7979+ * it can't hurt anything as far as I can tell8080+ */8181+#define start_thread(_regs, _pc, _usp) \8282+do { \8383+ set_fs(USER_DS); /* reads from user space */ \8484+ (_regs)->pc = (_pc); \8585+ (_regs)->regs[1] = 0; /* ABIV1 is R7, uClibc_main rtdl arg */ \8686+ (_regs)->regs[2] = 0; \8787+ (_regs)->regs[3] = 0; /* ABIV2 is R7, use it? */ \8888+ (_regs)->sr &= ~PS_S; \8989+ (_regs)->usp = (_usp); \9090+} while (0)9191+9292+/* Forward declaration, a strange C thing */9393+struct task_struct;9494+9595+/* Free all resources held by a thread. */9696+static inline void release_thread(struct task_struct *dead_task)9797+{9898+}9999+100100+/* Prepare to copy thread state - unlazy all lazy status */101101+#define prepare_to_copy(tsk) do { } while (0)102102+103103+extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);104104+105105+#define copy_segments(tsk, mm) do { } while (0)106106+#define release_segments(mm) do { } while (0)107107+#define forget_segments() do { } while (0)108108+109109+extern unsigned long thread_saved_pc(struct task_struct *tsk);110110+111111+unsigned long get_wchan(struct task_struct *p);112112+113113+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)114114+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp)115115+116116+#define task_pt_regs(p) \117117+ ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1)118118+119119+#define cpu_relax() barrier()120120+121121+#endif /* __ASM_CSKY_PROCESSOR_H */
···11+// SPDX-License-Identifier: GPL-2.022+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.33+44+#include <linux/module.h>55+#include <linux/version.h>66+#include <linux/sched.h>77+#include <linux/sched/task_stack.h>88+#include <linux/sched/debug.h>99+#include <linux/delay.h>1010+#include <linux/kallsyms.h>1111+#include <linux/uaccess.h>1212+#include <linux/ptrace.h>1313+1414+#include <asm/elf.h>1515+#include <abi/reg_ops.h>1616+1717+struct cpuinfo_csky cpu_data[NR_CPUS];1818+1919+asmlinkage void ret_from_fork(void);2020+asmlinkage void ret_from_kernel_thread(void);2121+2222+/*2323+ * Some archs flush debug and FPU info here2424+ */2525+void flush_thread(void){}2626+2727+/*2828+ * Return saved PC from a blocked thread2929+ */3030+unsigned long thread_saved_pc(struct task_struct *tsk)3131+{3232+ struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;3333+3434+ return sw->r15;3535+}3636+3737+int copy_thread(unsigned long clone_flags,3838+ unsigned long usp,3939+ unsigned long kthread_arg,4040+ struct task_struct *p)4141+{4242+ struct switch_stack *childstack;4343+ struct pt_regs *childregs = task_pt_regs(p);4444+4545+#ifdef CONFIG_CPU_HAS_FPU4646+ save_to_user_fp(&p->thread.user_fp);4747+#endif4848+4949+ childstack = ((struct switch_stack *) childregs) - 1;5050+ memset(childstack, 0, sizeof(struct switch_stack));5151+5252+ /* setup ksp for switch_to !!! */5353+ p->thread.ksp = (unsigned long)childstack;5454+5555+ if (unlikely(p->flags & PF_KTHREAD)) {5656+ memset(childregs, 0, sizeof(struct pt_regs));5757+ childstack->r15 = (unsigned long) ret_from_kernel_thread;5858+ childstack->r8 = kthread_arg;5959+ childstack->r9 = usp;6060+ childregs->sr = mfcr("psr");6161+ } else {6262+ *childregs = *(current_pt_regs());6363+ if (usp)6464+ childregs->usp = usp;6565+ if (clone_flags & CLONE_SETTLS)6666+ task_thread_info(p)->tp_value = childregs->tls6767+ = childregs->regs[0];6868+6969+ childregs->a0 = 0;7070+ childstack->r15 = (unsigned long) ret_from_fork;7171+ }7272+7373+ return 0;7474+}7575+7676+/* Fill in the fpu structure for a core dump. */7777+int dump_fpu(struct pt_regs *regs, struct user_fp *fpu)7878+{7979+ memcpy(fpu, ¤t->thread.user_fp, sizeof(*fpu));8080+ return 1;8181+}8282+EXPORT_SYMBOL(dump_fpu);8383+8484+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs)8585+{8686+ struct pt_regs *regs = task_pt_regs(tsk);8787+8888+ /* NOTE: usp is error value. */8989+ ELF_CORE_COPY_REGS((*pr_regs), regs)9090+9191+ return 1;9292+}9393+9494+unsigned long get_wchan(struct task_struct *p)9595+{9696+ unsigned long esp, pc;9797+ unsigned long stack_page;9898+ int count = 0;9999+100100+ if (!p || p == current || p->state == TASK_RUNNING)101101+ return 0;102102+103103+ stack_page = (unsigned long)p;104104+ esp = p->thread.esp0;105105+ do {106106+ if (esp < stack_page+sizeof(struct task_struct) ||107107+ esp >= 8184+stack_page)108108+ return 0;109109+ /*FIXME: There's may be error here!*/110110+ pc = ((unsigned long *)esp)[1];111111+ /* FIXME: This depends on the order of these functions. */112112+ if (!in_sched_functions(pc))113113+ return pc;114114+ esp = *(unsigned long *) esp;115115+ } while (count++ < 16);116116+ return 0;117117+}118118+EXPORT_SYMBOL(get_wchan);119119+120120+#ifndef CONFIG_CPU_PM_NONE121121+void arch_cpu_idle(void)122122+{123123+#ifdef CONFIG_CPU_PM_WAIT124124+ asm volatile("wait\n");125125+#endif126126+127127+#ifdef CONFIG_CPU_PM_DOZE128128+ asm volatile("doze\n");129129+#endif130130+131131+#ifdef CONFIG_CPU_PM_STOP132132+ asm volatile("stop\n");133133+#endif134134+ local_irq_enable();135135+}136136+#endif
+347
arch/csky/kernel/signal.c
···11+// SPDX-License-Identifier: GPL-2.022+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.33+44+#include <linux/sched.h>55+#include <linux/mm.h>66+#include <linux/kernel.h>77+#include <linux/signal.h>88+#include <linux/syscalls.h>99+#include <linux/errno.h>1010+#include <linux/wait.h>1111+#include <linux/ptrace.h>1212+#include <linux/unistd.h>1313+#include <linux/stddef.h>1414+#include <linux/highuid.h>1515+#include <linux/personality.h>1616+#include <linux/tty.h>1717+#include <linux/binfmts.h>1818+#include <linux/tracehook.h>1919+#include <linux/freezer.h>2020+#include <linux/uaccess.h>2121+2222+#include <asm/setup.h>2323+#include <asm/pgtable.h>2424+#include <asm/traps.h>2525+#include <asm/ucontext.h>2626+#include <asm/vdso.h>2727+2828+#include <abi/regdef.h>2929+3030+#ifdef CONFIG_CPU_HAS_FPU3131+#include <abi/fpu.h>3232+3333+static int restore_fpu_state(struct sigcontext *sc)3434+{3535+ int err = 0;3636+ struct user_fp user_fp;3737+3838+ err = copy_from_user(&user_fp, &sc->sc_user_fp, sizeof(user_fp));3939+4040+ restore_from_user_fp(&user_fp);4141+4242+ return err;4343+}4444+4545+static int save_fpu_state(struct sigcontext *sc)4646+{4747+ struct user_fp user_fp;4848+4949+ save_to_user_fp(&user_fp);5050+5151+ return copy_to_user(&sc->sc_user_fp, &user_fp, sizeof(user_fp));5252+}5353+#else5454+static inline int restore_fpu_state(struct sigcontext *sc) { return 0; }5555+static inline int save_fpu_state(struct sigcontext *sc) { return 0; }5656+#endif5757+5858+struct rt_sigframe {5959+ int sig;6060+ struct siginfo *pinfo;6161+ void *puc;6262+ struct siginfo info;6363+ struct ucontext uc;6464+};6565+6666+static int6767+restore_sigframe(struct pt_regs *regs,6868+ struct sigcontext *sc, int *pr2)6969+{7070+ int err = 0;7171+7272+ /* Always make any pending restarted system calls return -EINTR */7373+ current_thread_info()->task->restart_block.fn = do_no_restart_syscall;7474+7575+ err |= copy_from_user(regs, &sc->sc_pt_regs, sizeof(struct pt_regs));7676+7777+ err |= restore_fpu_state(sc);7878+7979+ *pr2 = regs->a0;8080+ return err;8181+}8282+8383+asmlinkage int8484+do_rt_sigreturn(void)8585+{8686+ sigset_t set;8787+ int a0;8888+ struct pt_regs *regs = current_pt_regs();8989+ struct rt_sigframe *frame = (struct rt_sigframe *)(regs->usp);9090+9191+ if (verify_area(VERIFY_READ, frame, sizeof(*frame)))9292+ goto badframe;9393+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))9494+ goto badframe;9595+9696+ sigdelsetmask(&set, (sigmask(SIGKILL) | sigmask(SIGSTOP)));9797+ spin_lock_irq(¤t->sighand->siglock);9898+ current->blocked = set;9999+ recalc_sigpending();100100+ spin_unlock_irq(¤t->sighand->siglock);101101+102102+ if (restore_sigframe(regs, &frame->uc.uc_mcontext, &a0))103103+ goto badframe;104104+105105+ return a0;106106+107107+badframe:108108+ force_sig(SIGSEGV, current);109109+ return 0;110110+}111111+112112+static int setup_sigframe(struct sigcontext *sc, struct pt_regs *regs)113113+{114114+ int err = 0;115115+116116+ err |= copy_to_user(&sc->sc_pt_regs, regs, sizeof(struct pt_regs));117117+ err |= save_fpu_state(sc);118118+119119+ return err;120120+}121121+122122+static inline void *123123+get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)124124+{125125+ unsigned long usp;126126+127127+ /* Default to using normal stack. */128128+ usp = regs->usp;129129+130130+ /* This is the X/Open sanctioned signal stack switching. */131131+ if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(usp)) {132132+ if (!on_sig_stack(usp))133133+ usp = current->sas_ss_sp + current->sas_ss_size;134134+ }135135+ return (void *)((usp - frame_size) & -8UL);136136+}137137+138138+static int139139+setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)140140+{141141+ struct rt_sigframe *frame;142142+ int err = 0;143143+144144+ struct csky_vdso *vdso = current->mm->context.vdso;145145+146146+ frame = get_sigframe(&ksig->ka, regs, sizeof(*frame));147147+ if (!frame)148148+ return 1;149149+150150+ err |= __put_user(ksig->sig, &frame->sig);151151+ err |= __put_user(&frame->info, &frame->pinfo);152152+ err |= __put_user(&frame->uc, &frame->puc);153153+ err |= copy_siginfo_to_user(&frame->info, &ksig->info);154154+155155+ /* Create the ucontext. */156156+ err |= __put_user(0, &frame->uc.uc_flags);157157+ err |= __put_user(0, &frame->uc.uc_link);158158+ err |= __put_user((void *)current->sas_ss_sp,159159+ &frame->uc.uc_stack.ss_sp);160160+ err |= __put_user(sas_ss_flags(regs->usp),161161+ &frame->uc.uc_stack.ss_flags);162162+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);163163+ err |= setup_sigframe(&frame->uc.uc_mcontext, regs);164164+ err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));165165+166166+ if (err)167167+ goto give_sigsegv;168168+169169+ /* Set up registers for signal handler */170170+ regs->usp = (unsigned long)frame;171171+ regs->pc = (unsigned long)ksig->ka.sa.sa_handler;172172+ regs->lr = (unsigned long)vdso->rt_signal_retcode;173173+174174+adjust_stack:175175+ regs->a0 = ksig->sig; /* first arg is signo */176176+ regs->a1 = (unsigned long)(&(frame->info));177177+ regs->a2 = (unsigned long)(&(frame->uc));178178+ return err;179179+180180+give_sigsegv:181181+ if (ksig->sig == SIGSEGV)182182+ ksig->ka.sa.sa_handler = SIG_DFL;183183+ force_sig(SIGSEGV, current);184184+ goto adjust_stack;185185+}186186+187187+/*188188+ * OK, we're invoking a handler189189+ */190190+static int191191+handle_signal(struct ksignal *ksig, struct pt_regs *regs)192192+{193193+ int ret;194194+ sigset_t *oldset = sigmask_to_save();195195+196196+ /*197197+ * set up the stack frame, regardless of SA_SIGINFO,198198+ * and pass info anyway.199199+ */200200+ ret = setup_rt_frame(ksig, oldset, regs);201201+202202+ if (ret != 0) {203203+ force_sigsegv(ksig->sig, current);204204+ return ret;205205+ }206206+207207+ /* Block the signal if we were successful. */208208+ spin_lock_irq(¤t->sighand->siglock);209209+ sigorsets(¤t->blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);210210+ if (!(ksig->ka.sa.sa_flags & SA_NODEFER))211211+ sigaddset(¤t->blocked, ksig->sig);212212+ recalc_sigpending();213213+ spin_unlock_irq(¤t->sighand->siglock);214214+215215+ return 0;216216+}217217+218218+/*219219+ * Note that 'init' is a special process: it doesn't get signals it doesn't220220+ * want to handle. Thus you cannot kill init even with a SIGKILL even by221221+ * mistake.222222+ *223223+ * Note that we go through the signals twice: once to check the signals224224+ * that the kernel can handle, and then we build all the user-level signal225225+ * handling stack-frames in one go after that.226226+ */227227+static void do_signal(struct pt_regs *regs, int syscall)228228+{229229+ unsigned int retval = 0, continue_addr = 0, restart_addr = 0;230230+ struct ksignal ksig;231231+232232+ /*233233+ * We want the common case to go fast, which234234+ * is why we may in certain cases get here from235235+ * kernel mode. Just return without doing anything236236+ * if so.237237+ */238238+ if (!user_mode(regs))239239+ return;240240+241241+ current->thread.esp0 = (unsigned long)regs;242242+243243+ /*244244+ * If we were from a system call, check for system call restarting...245245+ */246246+ if (syscall) {247247+ continue_addr = regs->pc;248248+#if defined(__CSKYABIV2__)249249+ restart_addr = continue_addr - 4;250250+#else251251+ restart_addr = continue_addr - 2;252252+#endif253253+ retval = regs->a0;254254+255255+ /*256256+ * Prepare for system call restart. We do this here so that a257257+ * debugger will see the already changed.258258+ */259259+ switch (retval) {260260+ case -ERESTARTNOHAND:261261+ case -ERESTARTSYS:262262+ case -ERESTARTNOINTR:263263+ regs->a0 = regs->orig_a0;264264+ regs->pc = restart_addr;265265+ break;266266+ case -ERESTART_RESTARTBLOCK:267267+ regs->a0 = -EINTR;268268+ break;269269+ }270270+ }271271+272272+ if (try_to_freeze())273273+ goto no_signal;274274+275275+ /*276276+ * Get the signal to deliver. When running under ptrace, at this277277+ * point the debugger may change all our registers ...278278+ */279279+ if (get_signal(&ksig)) {280280+ /*281281+ * Depending on the signal settings we may need to revert the282282+ * decision to restart the system call. But skip this if a283283+ * debugger has chosen to restart at a different PC.284284+ */285285+ if (regs->pc == restart_addr) {286286+ if (retval == -ERESTARTNOHAND ||287287+ (retval == -ERESTARTSYS &&288288+ !(ksig.ka.sa.sa_flags & SA_RESTART))) {289289+ regs->a0 = -EINTR;290290+ regs->pc = continue_addr;291291+ }292292+ }293293+294294+ /* Whee! Actually deliver the signal. */295295+ if (handle_signal(&ksig, regs) == 0) {296296+ /*297297+ * A signal was successfully delivered; the saved298298+ * sigmask will have been stored in the signal frame,299299+ * and will be restored by sigreturn, so we can simply300300+ * clear the TIF_RESTORE_SIGMASK flag.301301+ */302302+ if (test_thread_flag(TIF_RESTORE_SIGMASK))303303+ clear_thread_flag(TIF_RESTORE_SIGMASK);304304+ }305305+ return;306306+ }307307+308308+no_signal:309309+ if (syscall) {310310+ /*311311+ * Handle restarting a different system call. As above,312312+ * if a debugger has chosen to restart at a different PC,313313+ * ignore the restart.314314+ */315315+ if (retval == -ERESTART_RESTARTBLOCK316316+ && regs->pc == continue_addr) {317317+#if defined(__CSKYABIV2__)318318+ regs->regs[3] = __NR_restart_syscall;319319+ regs->pc -= 4;320320+#else321321+ regs->regs[9] = __NR_restart_syscall;322322+ regs->pc -= 2;323323+#endif324324+ }325325+326326+ /*327327+ * If there's no signal to deliver, we just put the saved328328+ * sigmask back.329329+ */330330+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {331331+ clear_thread_flag(TIF_RESTORE_SIGMASK);332332+ sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);333333+ }334334+ }335335+}336336+337337+asmlinkage void338338+do_notify_resume(unsigned int thread_flags, struct pt_regs *regs, int syscall)339339+{340340+ if (thread_flags & _TIF_SIGPENDING)341341+ do_signal(regs, syscall);342342+343343+ if (thread_flags & _TIF_NOTIFY_RESUME) {344344+ clear_thread_flag(TIF_NOTIFY_RESUME);345345+ tracehook_notify_resume(regs);346346+ }347347+}