Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky: Process management and Signal

This patch adds files related to task_switch, sigcontext, signal,
fpu context switch.

Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Eric W. Biederman <ebiederm@xmission.com>

Guo Ren e9564df7 013de2d6

+1231
+275
arch/csky/abiv2/fpu.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/ptrace.h> 5 + #include <linux/uaccess.h> 6 + #include <abi/reg_ops.h> 7 + 8 + #define MTCR_MASK 0xFC00FFE0 9 + #define MFCR_MASK 0xFC00FFE0 10 + #define MTCR_DIST 0xC0006420 11 + #define MFCR_DIST 0xC0006020 12 + 13 + void __init init_fpu(void) 14 + { 15 + mtcr("cr<1, 2>", 0); 16 + } 17 + 18 + /* 19 + * fpu_libc_helper() is to help libc to excute: 20 + * - mfcr %a, cr<1, 2> 21 + * - mfcr %a, cr<2, 2> 22 + * - mtcr %a, cr<1, 2> 23 + * - mtcr %a, cr<2, 2> 24 + */ 25 + int fpu_libc_helper(struct pt_regs *regs) 26 + { 27 + int fault; 28 + unsigned long instrptr, regx = 0; 29 + unsigned long index = 0, tmp = 0; 30 + unsigned long tinstr = 0; 31 + u16 instr_hi, instr_low; 32 + 33 + instrptr = instruction_pointer(regs); 34 + if (instrptr & 1) 35 + return 0; 36 + 37 + fault = __get_user(instr_low, (u16 *)instrptr); 38 + if (fault) 39 + return 0; 40 + 41 + fault = __get_user(instr_hi, (u16 *)(instrptr + 2)); 42 + if (fault) 43 + return 0; 44 + 45 + tinstr = instr_hi | ((unsigned long)instr_low << 16); 46 + 47 + if (((tinstr >> 21) & 0x1F) != 2) 48 + return 0; 49 + 50 + if ((tinstr & MTCR_MASK) == MTCR_DIST) { 51 + index = (tinstr >> 16) & 0x1F; 52 + if (index > 13) 53 + return 0; 54 + 55 + tmp = tinstr & 0x1F; 56 + if (tmp > 2) 57 + return 0; 58 + 59 + regx = *(&regs->a0 + index); 60 + 61 + if (tmp == 1) 62 + mtcr("cr<1, 2>", regx); 63 + else if (tmp == 2) 64 + mtcr("cr<2, 2>", regx); 65 + else 66 + return 0; 67 + 68 + regs->pc += 4; 69 + return 1; 70 + } 71 + 72 + if ((tinstr & MFCR_MASK) == MFCR_DIST) { 73 + index = tinstr & 0x1F; 74 + if (index > 13) 75 + return 0; 76 + 77 + tmp = ((tinstr >> 16) & 0x1F); 78 + if (tmp > 2) 79 + return 0; 80 + 81 + if (tmp == 1) 82 + regx = mfcr("cr<1, 2>"); 83 + else if (tmp == 2) 84 + regx = mfcr("cr<2, 2>"); 85 + else 86 + return 0; 87 + 88 + *(&regs->a0 + index) = regx; 89 + 90 + regs->pc += 4; 91 + return 1; 92 + } 93 + 94 + return 0; 95 + } 96 + 97 + void fpu_fpe(struct pt_regs *regs) 98 + { 99 + int sig, code; 100 + unsigned int fesr; 101 + 102 + fesr = mfcr("cr<2, 2>"); 103 + 104 + sig = SIGFPE; 105 + code = FPE_FLTUNK; 106 + 107 + if (fesr & FPE_ILLE) { 108 + sig = SIGILL; 109 + code = ILL_ILLOPC; 110 + } else if (fesr & FPE_IDC) { 111 + sig = SIGILL; 112 + code = ILL_ILLOPN; 113 + } else if (fesr & FPE_FEC) { 114 + sig = SIGFPE; 115 + if (fesr & FPE_IOC) 116 + code = FPE_FLTINV; 117 + else if (fesr & FPE_DZC) 118 + code = FPE_FLTDIV; 119 + else if (fesr & FPE_UFC) 120 + code = FPE_FLTUND; 121 + else if (fesr & FPE_OFC) 122 + code = FPE_FLTOVF; 123 + else if (fesr & FPE_IXC) 124 + code = FPE_FLTRES; 125 + } 126 + 127 + force_sig_fault(sig, code, (void __user *)regs->pc, current); 128 + } 129 + 130 + #define FMFVR_FPU_REGS(vrx, vry) \ 131 + "fmfvrl %0, "#vrx"\n" \ 132 + "fmfvrh %1, "#vrx"\n" \ 133 + "fmfvrl %2, "#vry"\n" \ 134 + "fmfvrh %3, "#vry"\n" 135 + 136 + #define FMTVR_FPU_REGS(vrx, vry) \ 137 + "fmtvrl "#vrx", %0\n" \ 138 + "fmtvrh "#vrx", %1\n" \ 139 + "fmtvrl "#vry", %2\n" \ 140 + "fmtvrh "#vry", %3\n" 141 + 142 + #define STW_FPU_REGS(a, b, c, d) \ 143 + "stw %0, (%4, "#a")\n" \ 144 + "stw %1, (%4, "#b")\n" \ 145 + "stw %2, (%4, "#c")\n" \ 146 + "stw %3, (%4, "#d")\n" 147 + 148 + #define LDW_FPU_REGS(a, b, c, d) \ 149 + "ldw %0, (%4, "#a")\n" \ 150 + "ldw %1, (%4, "#b")\n" \ 151 + "ldw %2, (%4, "#c")\n" \ 152 + "ldw %3, (%4, "#d")\n" 153 + 154 + void save_to_user_fp(struct user_fp *user_fp) 155 + { 156 + unsigned long flg; 157 + unsigned long tmp1, tmp2; 158 + unsigned long *fpregs; 159 + 160 + local_irq_save(flg); 161 + 162 + tmp1 = mfcr("cr<1, 2>"); 163 + tmp2 = mfcr("cr<2, 2>"); 164 + 165 + user_fp->fcr = tmp1; 166 + user_fp->fesr = tmp2; 167 + 168 + fpregs = &user_fp->vr[0]; 169 + #ifdef CONFIG_CPU_HAS_FPUV2 170 + #ifdef CONFIG_CPU_HAS_VDSP 171 + asm volatile( 172 + "vstmu.32 vr0-vr3, (%0)\n" 173 + "vstmu.32 vr4-vr7, (%0)\n" 174 + "vstmu.32 vr8-vr11, (%0)\n" 175 + "vstmu.32 vr12-vr15, (%0)\n" 176 + "fstmu.64 vr16-vr31, (%0)\n" 177 + : "+a"(fpregs) 178 + ::"memory"); 179 + #else 180 + asm volatile( 181 + "fstmu.64 vr0-vr31, (%0)\n" 182 + : "+a"(fpregs) 183 + ::"memory"); 184 + #endif 185 + #else 186 + { 187 + unsigned long tmp3, tmp4; 188 + 189 + asm volatile( 190 + FMFVR_FPU_REGS(vr0, vr1) 191 + STW_FPU_REGS(0, 4, 16, 20) 192 + FMFVR_FPU_REGS(vr2, vr3) 193 + STW_FPU_REGS(32, 36, 48, 52) 194 + FMFVR_FPU_REGS(vr4, vr5) 195 + STW_FPU_REGS(64, 68, 80, 84) 196 + FMFVR_FPU_REGS(vr6, vr7) 197 + STW_FPU_REGS(96, 100, 112, 116) 198 + "addi %4, 128\n" 199 + FMFVR_FPU_REGS(vr8, vr9) 200 + STW_FPU_REGS(0, 4, 16, 20) 201 + FMFVR_FPU_REGS(vr10, vr11) 202 + STW_FPU_REGS(32, 36, 48, 52) 203 + FMFVR_FPU_REGS(vr12, vr13) 204 + STW_FPU_REGS(64, 68, 80, 84) 205 + FMFVR_FPU_REGS(vr14, vr15) 206 + STW_FPU_REGS(96, 100, 112, 116) 207 + : "=a"(tmp1), "=a"(tmp2), "=a"(tmp3), 208 + "=a"(tmp4), "+a"(fpregs) 209 + ::"memory"); 210 + } 211 + #endif 212 + 213 + local_irq_restore(flg); 214 + } 215 + 216 + void restore_from_user_fp(struct user_fp *user_fp) 217 + { 218 + unsigned long flg; 219 + unsigned long tmp1, tmp2; 220 + unsigned long *fpregs; 221 + 222 + local_irq_save(flg); 223 + 224 + tmp1 = user_fp->fcr; 225 + tmp2 = user_fp->fesr; 226 + 227 + mtcr("cr<1, 2>", tmp1); 228 + mtcr("cr<2, 2>", tmp2); 229 + 230 + fpregs = &user_fp->vr[0]; 231 + #ifdef CONFIG_CPU_HAS_FPUV2 232 + #ifdef CONFIG_CPU_HAS_VDSP 233 + asm volatile( 234 + "vldmu.32 vr0-vr3, (%0)\n" 235 + "vldmu.32 vr4-vr7, (%0)\n" 236 + "vldmu.32 vr8-vr11, (%0)\n" 237 + "vldmu.32 vr12-vr15, (%0)\n" 238 + "fldmu.64 vr16-vr31, (%0)\n" 239 + : "+a"(fpregs) 240 + ::"memory"); 241 + #else 242 + asm volatile( 243 + "fldmu.64 vr0-vr31, (%0)\n" 244 + : "+a"(fpregs) 245 + ::"memory"); 246 + #endif 247 + #else 248 + { 249 + unsigned long tmp3, tmp4; 250 + 251 + asm volatile( 252 + LDW_FPU_REGS(0, 4, 16, 20) 253 + FMTVR_FPU_REGS(vr0, vr1) 254 + LDW_FPU_REGS(32, 36, 48, 52) 255 + FMTVR_FPU_REGS(vr2, vr3) 256 + LDW_FPU_REGS(64, 68, 80, 84) 257 + FMTVR_FPU_REGS(vr4, vr5) 258 + LDW_FPU_REGS(96, 100, 112, 116) 259 + FMTVR_FPU_REGS(vr6, vr7) 260 + "addi %4, 128\n" 261 + LDW_FPU_REGS(0, 4, 16, 20) 262 + FMTVR_FPU_REGS(vr8, vr9) 263 + LDW_FPU_REGS(32, 36, 48, 52) 264 + FMTVR_FPU_REGS(vr10, vr11) 265 + LDW_FPU_REGS(64, 68, 80, 84) 266 + FMTVR_FPU_REGS(vr12, vr13) 267 + LDW_FPU_REGS(96, 100, 112, 116) 268 + FMTVR_FPU_REGS(vr14, vr15) 269 + : "=a"(tmp1), "=a"(tmp2), "=a"(tmp3), 270 + "=a"(tmp4), "+a"(fpregs) 271 + ::"memory"); 272 + } 273 + #endif 274 + local_irq_restore(flg); 275 + }
+66
arch/csky/abiv2/inc/abi/fpu.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_FPU_H 5 + #define __ASM_CSKY_FPU_H 6 + 7 + #include <asm/sigcontext.h> 8 + #include <asm/ptrace.h> 9 + 10 + int fpu_libc_helper(struct pt_regs *regs); 11 + void fpu_fpe(struct pt_regs *regs); 12 + void __init init_fpu(void); 13 + 14 + void save_to_user_fp(struct user_fp *user_fp); 15 + void restore_from_user_fp(struct user_fp *user_fp); 16 + 17 + /* 18 + * Define the fesr bit for fpe handle. 19 + */ 20 + #define FPE_ILLE (1 << 16) /* Illegal instruction */ 21 + #define FPE_FEC (1 << 7) /* Input float-point arithmetic exception */ 22 + #define FPE_IDC (1 << 5) /* Input denormalized exception */ 23 + #define FPE_IXC (1 << 4) /* Inexact exception */ 24 + #define FPE_UFC (1 << 3) /* Underflow exception */ 25 + #define FPE_OFC (1 << 2) /* Overflow exception */ 26 + #define FPE_DZC (1 << 1) /* Divide by zero exception */ 27 + #define FPE_IOC (1 << 0) /* Invalid operation exception */ 28 + #define FPE_REGULAR_EXCEPTION (FPE_IXC | FPE_UFC | FPE_OFC | FPE_DZC | FPE_IOC) 29 + 30 + #ifdef CONFIG_OPEN_FPU_IDE 31 + #define IDE_STAT (1 << 5) 32 + #else 33 + #define IDE_STAT 0 34 + #endif 35 + 36 + #ifdef CONFIG_OPEN_FPU_IXE 37 + #define IXE_STAT (1 << 4) 38 + #else 39 + #define IXE_STAT 0 40 + #endif 41 + 42 + #ifdef CONFIG_OPEN_FPU_UFE 43 + #define UFE_STAT (1 << 3) 44 + #else 45 + #define UFE_STAT 0 46 + #endif 47 + 48 + #ifdef CONFIG_OPEN_FPU_OFE 49 + #define OFE_STAT (1 << 2) 50 + #else 51 + #define OFE_STAT 0 52 + #endif 53 + 54 + #ifdef CONFIG_OPEN_FPU_DZE 55 + #define DZE_STAT (1 << 1) 56 + #else 57 + #define DZE_STAT 0 58 + #endif 59 + 60 + #ifdef CONFIG_OPEN_FPU_IOE 61 + #define IOE_STAT (1 << 0) 62 + #else 63 + #define IOE_STAT 0 64 + #endif 65 + 66 + #endif /* __ASM_CSKY_FPU_H */
+150
arch/csky/include/asm/mmu_context.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_MMU_CONTEXT_H 5 + #define __ASM_CSKY_MMU_CONTEXT_H 6 + 7 + #include <asm-generic/mm_hooks.h> 8 + #include <asm/setup.h> 9 + #include <asm/page.h> 10 + #include <asm/cacheflush.h> 11 + #include <asm/tlbflush.h> 12 + 13 + #include <linux/errno.h> 14 + #include <linux/sched.h> 15 + #include <abi/ckmmu.h> 16 + 17 + static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel) 18 + { 19 + pgd &= ~(1<<31); 20 + pgd += PHYS_OFFSET; 21 + pgd |= 1; 22 + setup_pgd(pgd, kernel); 23 + } 24 + 25 + #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ 26 + tlbmiss_handler_setup_pgd((unsigned long)pgd, 0) 27 + #define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \ 28 + tlbmiss_handler_setup_pgd((unsigned long)pgd, 1) 29 + 30 + static inline unsigned long tlb_get_pgd(void) 31 + { 32 + return ((get_pgd()|(1<<31)) - PHYS_OFFSET) & ~1; 33 + } 34 + 35 + #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) 36 + #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) 37 + #define asid_cache(cpu) (cpu_data[cpu].asid_cache) 38 + 39 + #define ASID_FIRST_VERSION (1 << CONFIG_CPU_ASID_BITS) 40 + #define ASID_INC 0x1 41 + #define ASID_MASK (ASID_FIRST_VERSION - 1) 42 + #define ASID_VERSION_MASK ~ASID_MASK 43 + 44 + #define destroy_context(mm) do {} while (0) 45 + #define enter_lazy_tlb(mm, tsk) do {} while (0) 46 + #define deactivate_mm(tsk, mm) do {} while (0) 47 + 48 + /* 49 + * All unused by hardware upper bits will be considered 50 + * as a software asid extension. 51 + */ 52 + static inline void 53 + get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) 54 + { 55 + unsigned long asid = asid_cache(cpu); 56 + 57 + asid += ASID_INC; 58 + if (!(asid & ASID_MASK)) { 59 + flush_tlb_all(); /* start new asid cycle */ 60 + if (!asid) /* fix version if needed */ 61 + asid = ASID_FIRST_VERSION; 62 + } 63 + cpu_context(cpu, mm) = asid_cache(cpu) = asid; 64 + } 65 + 66 + /* 67 + * Initialize the context related info for a new mm_struct 68 + * instance. 69 + */ 70 + static inline int 71 + init_new_context(struct task_struct *tsk, struct mm_struct *mm) 72 + { 73 + int i; 74 + 75 + for_each_online_cpu(i) 76 + cpu_context(i, mm) = 0; 77 + return 0; 78 + } 79 + 80 + static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 81 + struct task_struct *tsk) 82 + { 83 + unsigned int cpu = smp_processor_id(); 84 + unsigned long flags; 85 + 86 + local_irq_save(flags); 87 + /* Check if our ASID is of an older version and thus invalid */ 88 + if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) 89 + get_new_mmu_context(next, cpu); 90 + write_mmu_entryhi(cpu_asid(cpu, next)); 91 + TLBMISS_HANDLER_SETUP_PGD(next->pgd); 92 + 93 + /* 94 + * Mark current->active_mm as not "active" anymore. 95 + * We don't want to mislead possible IPI tlb flush routines. 96 + */ 97 + cpumask_clear_cpu(cpu, mm_cpumask(prev)); 98 + cpumask_set_cpu(cpu, mm_cpumask(next)); 99 + 100 + local_irq_restore(flags); 101 + } 102 + 103 + /* 104 + * After we have set current->mm to a new value, this activates 105 + * the context for the new mm so we see the new mappings. 106 + */ 107 + static inline void 108 + activate_mm(struct mm_struct *prev, struct mm_struct *next) 109 + { 110 + unsigned long flags; 111 + int cpu = smp_processor_id(); 112 + 113 + local_irq_save(flags); 114 + 115 + /* Unconditionally get a new ASID. */ 116 + get_new_mmu_context(next, cpu); 117 + 118 + write_mmu_entryhi(cpu_asid(cpu, next)); 119 + TLBMISS_HANDLER_SETUP_PGD(next->pgd); 120 + 121 + /* mark mmu ownership change */ 122 + cpumask_clear_cpu(cpu, mm_cpumask(prev)); 123 + cpumask_set_cpu(cpu, mm_cpumask(next)); 124 + 125 + local_irq_restore(flags); 126 + } 127 + 128 + /* 129 + * If mm is currently active_mm, we can't really drop it. Instead, 130 + * we will get a new one for it. 131 + */ 132 + static inline void 133 + drop_mmu_context(struct mm_struct *mm, unsigned int cpu) 134 + { 135 + unsigned long flags; 136 + 137 + local_irq_save(flags); 138 + 139 + if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { 140 + get_new_mmu_context(mm, cpu); 141 + write_mmu_entryhi(cpu_asid(cpu, mm)); 142 + } else { 143 + /* will get a new context next time */ 144 + cpu_context(cpu, mm) = 0; 145 + } 146 + 147 + local_irq_restore(flags); 148 + } 149 + 150 + #endif /* __ASM_CSKY_MMU_CONTEXT_H */
+121
arch/csky/include/asm/processor.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_PROCESSOR_H 5 + #define __ASM_CSKY_PROCESSOR_H 6 + 7 + /* 8 + * Default implementation of macro that returns current 9 + * instruction pointer ("program counter"). 10 + */ 11 + #define current_text_addr() ({ __label__ _l; _l: &&_l; }) 12 + 13 + #include <linux/bitops.h> 14 + #include <asm/segment.h> 15 + #include <asm/ptrace.h> 16 + #include <asm/current.h> 17 + #include <asm/cache.h> 18 + #include <abi/reg_ops.h> 19 + #include <abi/regdef.h> 20 + #ifdef CONFIG_CPU_HAS_FPU 21 + #include <abi/fpu.h> 22 + #endif 23 + 24 + struct cpuinfo_csky { 25 + unsigned long udelay_val; 26 + unsigned long asid_cache; 27 + /* 28 + * Capability and feature descriptor structure for CSKY CPU 29 + */ 30 + unsigned long options; 31 + unsigned int processor_id[4]; 32 + unsigned int fpu_id; 33 + } __aligned(SMP_CACHE_BYTES); 34 + 35 + extern struct cpuinfo_csky cpu_data[]; 36 + 37 + /* 38 + * User space process size: 2GB. This is hardcoded into a few places, 39 + * so don't change it unless you know what you are doing. TASK_SIZE 40 + * for a 64 bit kernel expandable to 8192EB, of which the current CSKY 41 + * implementations will "only" be able to use 1TB ... 42 + */ 43 + #define TASK_SIZE 0x7fff8000UL 44 + 45 + #ifdef __KERNEL__ 46 + #define STACK_TOP TASK_SIZE 47 + #define STACK_TOP_MAX STACK_TOP 48 + #endif 49 + 50 + /* This decides where the kernel will search for a free chunk of vm 51 + * space during mmap's. 52 + */ 53 + #define TASK_UNMAPPED_BASE (TASK_SIZE / 3) 54 + 55 + struct thread_struct { 56 + unsigned long ksp; /* kernel stack pointer */ 57 + unsigned long sr; /* saved status register */ 58 + unsigned long esp0; /* points to SR of stack frame */ 59 + unsigned long hi; 60 + unsigned long lo; 61 + 62 + /* Other stuff associated with the thread. */ 63 + unsigned long address; /* Last user fault */ 64 + unsigned long error_code; 65 + 66 + /* FPU regs */ 67 + struct user_fp __aligned(16) user_fp; 68 + }; 69 + 70 + #define INIT_THREAD { \ 71 + .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \ 72 + .sr = DEFAULT_PSR_VALUE, \ 73 + } 74 + 75 + /* 76 + * Do necessary setup to start up a newly executed thread. 77 + * 78 + * pass the data segment into user programs if it exists, 79 + * it can't hurt anything as far as I can tell 80 + */ 81 + #define start_thread(_regs, _pc, _usp) \ 82 + do { \ 83 + set_fs(USER_DS); /* reads from user space */ \ 84 + (_regs)->pc = (_pc); \ 85 + (_regs)->regs[1] = 0; /* ABIV1 is R7, uClibc_main rtdl arg */ \ 86 + (_regs)->regs[2] = 0; \ 87 + (_regs)->regs[3] = 0; /* ABIV2 is R7, use it? */ \ 88 + (_regs)->sr &= ~PS_S; \ 89 + (_regs)->usp = (_usp); \ 90 + } while (0) 91 + 92 + /* Forward declaration, a strange C thing */ 93 + struct task_struct; 94 + 95 + /* Free all resources held by a thread. */ 96 + static inline void release_thread(struct task_struct *dead_task) 97 + { 98 + } 99 + 100 + /* Prepare to copy thread state - unlazy all lazy status */ 101 + #define prepare_to_copy(tsk) do { } while (0) 102 + 103 + extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 104 + 105 + #define copy_segments(tsk, mm) do { } while (0) 106 + #define release_segments(mm) do { } while (0) 107 + #define forget_segments() do { } while (0) 108 + 109 + extern unsigned long thread_saved_pc(struct task_struct *tsk); 110 + 111 + unsigned long get_wchan(struct task_struct *p); 112 + 113 + #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) 114 + #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) 115 + 116 + #define task_pt_regs(p) \ 117 + ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1) 118 + 119 + #define cpu_relax() barrier() 120 + 121 + #endif /* __ASM_CSKY_PROCESSOR_H */
+36
arch/csky/include/asm/switch_to.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_SWITCH_TO_H 5 + #define __ASM_CSKY_SWITCH_TO_H 6 + 7 + #include <linux/thread_info.h> 8 + #ifdef CONFIG_CPU_HAS_FPU 9 + #include <abi/fpu.h> 10 + static inline void __switch_to_fpu(struct task_struct *prev, 11 + struct task_struct *next) 12 + { 13 + save_to_user_fp(&prev->thread.user_fp); 14 + restore_from_user_fp(&next->thread.user_fp); 15 + } 16 + #else 17 + static inline void __switch_to_fpu(struct task_struct *prev, 18 + struct task_struct *next) 19 + {} 20 + #endif 21 + 22 + /* 23 + * Context switching is now performed out-of-line in switch_to.S 24 + */ 25 + extern struct task_struct *__switch_to(struct task_struct *, 26 + struct task_struct *); 27 + 28 + #define switch_to(prev, next, last) \ 29 + do { \ 30 + struct task_struct *__prev = (prev); \ 31 + struct task_struct *__next = (next); \ 32 + __switch_to_fpu(__prev, __next); \ 33 + ((last) = __switch_to((prev), (next))); \ 34 + } while (0) 35 + 36 + #endif /* __ASM_CSKY_SWITCH_TO_H */
+75
arch/csky/include/asm/thread_info.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef _ASM_CSKY_THREAD_INFO_H 5 + #define _ASM_CSKY_THREAD_INFO_H 6 + 7 + #ifndef __ASSEMBLY__ 8 + 9 + #include <linux/version.h> 10 + #include <asm/types.h> 11 + #include <asm/page.h> 12 + #include <asm/processor.h> 13 + 14 + struct thread_info { 15 + struct task_struct *task; 16 + void *dump_exec_domain; 17 + unsigned long flags; 18 + int preempt_count; 19 + unsigned long tp_value; 20 + mm_segment_t addr_limit; 21 + struct restart_block restart_block; 22 + struct pt_regs *regs; 23 + unsigned int cpu; 24 + }; 25 + 26 + #define INIT_THREAD_INFO(tsk) \ 27 + { \ 28 + .task = &tsk, \ 29 + .preempt_count = INIT_PREEMPT_COUNT, \ 30 + .addr_limit = KERNEL_DS, \ 31 + .cpu = 0, \ 32 + .restart_block = { \ 33 + .fn = do_no_restart_syscall, \ 34 + }, \ 35 + } 36 + 37 + #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 38 + 39 + static inline struct thread_info *current_thread_info(void) 40 + { 41 + unsigned long sp; 42 + 43 + asm volatile("mov %0, sp\n":"=r"(sp)); 44 + 45 + return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); 46 + } 47 + 48 + #endif /* !__ASSEMBLY__ */ 49 + 50 + /* entry.S relies on these definitions! 51 + * bits 0-5 are tested at every exception exit 52 + */ 53 + #define TIF_SIGPENDING 0 /* signal pending */ 54 + #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ 55 + #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 56 + #define TIF_SYSCALL_TRACE 5 /* syscall trace active */ 57 + #define TIF_DELAYED_TRACE 14 /* single step a syscall */ 58 + #define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */ 59 + #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 60 + #define TIF_FREEZE 19 /* thread is freezing for suspend */ 61 + #define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ 62 + #define TIF_SECCOMP 21 /* secure computing */ 63 + 64 + #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 65 + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 66 + #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 67 + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 68 + #define _TIF_DELAYED_TRACE (1 << TIF_DELAYED_TRACE) 69 + #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 70 + #define _TIF_MEMDIE (1 << TIF_MEMDIE) 71 + #define _TIF_FREEZE (1 << TIF_FREEZE) 72 + #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 73 + #define _TIF_SECCOMP (1 << TIF_SECCOMP) 74 + 75 + #endif /* _ASM_CSKY_THREAD_INFO_H */
+14
arch/csky/include/uapi/asm/sigcontext.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_SIGCONTEXT_H 5 + #define __ASM_CSKY_SIGCONTEXT_H 6 + 7 + #include <asm/ptrace.h> 8 + 9 + struct sigcontext { 10 + struct pt_regs sc_pt_regs; 11 + struct user_fp sc_user_fp; 12 + }; 13 + 14 + #endif /* __ASM_CSKY_SIGCONTEXT_H */
+136
arch/csky/kernel/process.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/module.h> 5 + #include <linux/version.h> 6 + #include <linux/sched.h> 7 + #include <linux/sched/task_stack.h> 8 + #include <linux/sched/debug.h> 9 + #include <linux/delay.h> 10 + #include <linux/kallsyms.h> 11 + #include <linux/uaccess.h> 12 + #include <linux/ptrace.h> 13 + 14 + #include <asm/elf.h> 15 + #include <abi/reg_ops.h> 16 + 17 + struct cpuinfo_csky cpu_data[NR_CPUS]; 18 + 19 + asmlinkage void ret_from_fork(void); 20 + asmlinkage void ret_from_kernel_thread(void); 21 + 22 + /* 23 + * Some archs flush debug and FPU info here 24 + */ 25 + void flush_thread(void){} 26 + 27 + /* 28 + * Return saved PC from a blocked thread 29 + */ 30 + unsigned long thread_saved_pc(struct task_struct *tsk) 31 + { 32 + struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp; 33 + 34 + return sw->r15; 35 + } 36 + 37 + int copy_thread(unsigned long clone_flags, 38 + unsigned long usp, 39 + unsigned long kthread_arg, 40 + struct task_struct *p) 41 + { 42 + struct switch_stack *childstack; 43 + struct pt_regs *childregs = task_pt_regs(p); 44 + 45 + #ifdef CONFIG_CPU_HAS_FPU 46 + save_to_user_fp(&p->thread.user_fp); 47 + #endif 48 + 49 + childstack = ((struct switch_stack *) childregs) - 1; 50 + memset(childstack, 0, sizeof(struct switch_stack)); 51 + 52 + /* setup ksp for switch_to !!! */ 53 + p->thread.ksp = (unsigned long)childstack; 54 + 55 + if (unlikely(p->flags & PF_KTHREAD)) { 56 + memset(childregs, 0, sizeof(struct pt_regs)); 57 + childstack->r15 = (unsigned long) ret_from_kernel_thread; 58 + childstack->r8 = kthread_arg; 59 + childstack->r9 = usp; 60 + childregs->sr = mfcr("psr"); 61 + } else { 62 + *childregs = *(current_pt_regs()); 63 + if (usp) 64 + childregs->usp = usp; 65 + if (clone_flags & CLONE_SETTLS) 66 + task_thread_info(p)->tp_value = childregs->tls 67 + = childregs->regs[0]; 68 + 69 + childregs->a0 = 0; 70 + childstack->r15 = (unsigned long) ret_from_fork; 71 + } 72 + 73 + return 0; 74 + } 75 + 76 + /* Fill in the fpu structure for a core dump. */ 77 + int dump_fpu(struct pt_regs *regs, struct user_fp *fpu) 78 + { 79 + memcpy(fpu, &current->thread.user_fp, sizeof(*fpu)); 80 + return 1; 81 + } 82 + EXPORT_SYMBOL(dump_fpu); 83 + 84 + int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs) 85 + { 86 + struct pt_regs *regs = task_pt_regs(tsk); 87 + 88 + /* NOTE: usp is error value. */ 89 + ELF_CORE_COPY_REGS((*pr_regs), regs) 90 + 91 + return 1; 92 + } 93 + 94 + unsigned long get_wchan(struct task_struct *p) 95 + { 96 + unsigned long esp, pc; 97 + unsigned long stack_page; 98 + int count = 0; 99 + 100 + if (!p || p == current || p->state == TASK_RUNNING) 101 + return 0; 102 + 103 + stack_page = (unsigned long)p; 104 + esp = p->thread.esp0; 105 + do { 106 + if (esp < stack_page+sizeof(struct task_struct) || 107 + esp >= 8184+stack_page) 108 + return 0; 109 + /*FIXME: There's may be error here!*/ 110 + pc = ((unsigned long *)esp)[1]; 111 + /* FIXME: This depends on the order of these functions. */ 112 + if (!in_sched_functions(pc)) 113 + return pc; 114 + esp = *(unsigned long *) esp; 115 + } while (count++ < 16); 116 + return 0; 117 + } 118 + EXPORT_SYMBOL(get_wchan); 119 + 120 + #ifndef CONFIG_CPU_PM_NONE 121 + void arch_cpu_idle(void) 122 + { 123 + #ifdef CONFIG_CPU_PM_WAIT 124 + asm volatile("wait\n"); 125 + #endif 126 + 127 + #ifdef CONFIG_CPU_PM_DOZE 128 + asm volatile("doze\n"); 129 + #endif 130 + 131 + #ifdef CONFIG_CPU_PM_STOP 132 + asm volatile("stop\n"); 133 + #endif 134 + local_irq_enable(); 135 + } 136 + #endif
+347
arch/csky/kernel/signal.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/sched.h> 5 + #include <linux/mm.h> 6 + #include <linux/kernel.h> 7 + #include <linux/signal.h> 8 + #include <linux/syscalls.h> 9 + #include <linux/errno.h> 10 + #include <linux/wait.h> 11 + #include <linux/ptrace.h> 12 + #include <linux/unistd.h> 13 + #include <linux/stddef.h> 14 + #include <linux/highuid.h> 15 + #include <linux/personality.h> 16 + #include <linux/tty.h> 17 + #include <linux/binfmts.h> 18 + #include <linux/tracehook.h> 19 + #include <linux/freezer.h> 20 + #include <linux/uaccess.h> 21 + 22 + #include <asm/setup.h> 23 + #include <asm/pgtable.h> 24 + #include <asm/traps.h> 25 + #include <asm/ucontext.h> 26 + #include <asm/vdso.h> 27 + 28 + #include <abi/regdef.h> 29 + 30 + #ifdef CONFIG_CPU_HAS_FPU 31 + #include <abi/fpu.h> 32 + 33 + static int restore_fpu_state(struct sigcontext *sc) 34 + { 35 + int err = 0; 36 + struct user_fp user_fp; 37 + 38 + err = copy_from_user(&user_fp, &sc->sc_user_fp, sizeof(user_fp)); 39 + 40 + restore_from_user_fp(&user_fp); 41 + 42 + return err; 43 + } 44 + 45 + static int save_fpu_state(struct sigcontext *sc) 46 + { 47 + struct user_fp user_fp; 48 + 49 + save_to_user_fp(&user_fp); 50 + 51 + return copy_to_user(&sc->sc_user_fp, &user_fp, sizeof(user_fp)); 52 + } 53 + #else 54 + static inline int restore_fpu_state(struct sigcontext *sc) { return 0; } 55 + static inline int save_fpu_state(struct sigcontext *sc) { return 0; } 56 + #endif 57 + 58 + struct rt_sigframe { 59 + int sig; 60 + struct siginfo *pinfo; 61 + void *puc; 62 + struct siginfo info; 63 + struct ucontext uc; 64 + }; 65 + 66 + static int 67 + restore_sigframe(struct pt_regs *regs, 68 + struct sigcontext *sc, int *pr2) 69 + { 70 + int err = 0; 71 + 72 + /* Always make any pending restarted system calls return -EINTR */ 73 + current_thread_info()->task->restart_block.fn = do_no_restart_syscall; 74 + 75 + err |= copy_from_user(regs, &sc->sc_pt_regs, sizeof(struct pt_regs)); 76 + 77 + err |= restore_fpu_state(sc); 78 + 79 + *pr2 = regs->a0; 80 + return err; 81 + } 82 + 83 + asmlinkage int 84 + do_rt_sigreturn(void) 85 + { 86 + sigset_t set; 87 + int a0; 88 + struct pt_regs *regs = current_pt_regs(); 89 + struct rt_sigframe *frame = (struct rt_sigframe *)(regs->usp); 90 + 91 + if (verify_area(VERIFY_READ, frame, sizeof(*frame))) 92 + goto badframe; 93 + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 94 + goto badframe; 95 + 96 + sigdelsetmask(&set, (sigmask(SIGKILL) | sigmask(SIGSTOP))); 97 + spin_lock_irq(&current->sighand->siglock); 98 + current->blocked = set; 99 + recalc_sigpending(); 100 + spin_unlock_irq(&current->sighand->siglock); 101 + 102 + if (restore_sigframe(regs, &frame->uc.uc_mcontext, &a0)) 103 + goto badframe; 104 + 105 + return a0; 106 + 107 + badframe: 108 + force_sig(SIGSEGV, current); 109 + return 0; 110 + } 111 + 112 + static int setup_sigframe(struct sigcontext *sc, struct pt_regs *regs) 113 + { 114 + int err = 0; 115 + 116 + err |= copy_to_user(&sc->sc_pt_regs, regs, sizeof(struct pt_regs)); 117 + err |= save_fpu_state(sc); 118 + 119 + return err; 120 + } 121 + 122 + static inline void * 123 + get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) 124 + { 125 + unsigned long usp; 126 + 127 + /* Default to using normal stack. */ 128 + usp = regs->usp; 129 + 130 + /* This is the X/Open sanctioned signal stack switching. */ 131 + if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(usp)) { 132 + if (!on_sig_stack(usp)) 133 + usp = current->sas_ss_sp + current->sas_ss_size; 134 + } 135 + return (void *)((usp - frame_size) & -8UL); 136 + } 137 + 138 + static int 139 + setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) 140 + { 141 + struct rt_sigframe *frame; 142 + int err = 0; 143 + 144 + struct csky_vdso *vdso = current->mm->context.vdso; 145 + 146 + frame = get_sigframe(&ksig->ka, regs, sizeof(*frame)); 147 + if (!frame) 148 + return 1; 149 + 150 + err |= __put_user(ksig->sig, &frame->sig); 151 + err |= __put_user(&frame->info, &frame->pinfo); 152 + err |= __put_user(&frame->uc, &frame->puc); 153 + err |= copy_siginfo_to_user(&frame->info, &ksig->info); 154 + 155 + /* Create the ucontext. */ 156 + err |= __put_user(0, &frame->uc.uc_flags); 157 + err |= __put_user(0, &frame->uc.uc_link); 158 + err |= __put_user((void *)current->sas_ss_sp, 159 + &frame->uc.uc_stack.ss_sp); 160 + err |= __put_user(sas_ss_flags(regs->usp), 161 + &frame->uc.uc_stack.ss_flags); 162 + err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); 163 + err |= setup_sigframe(&frame->uc.uc_mcontext, regs); 164 + err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 165 + 166 + if (err) 167 + goto give_sigsegv; 168 + 169 + /* Set up registers for signal handler */ 170 + regs->usp = (unsigned long)frame; 171 + regs->pc = (unsigned long)ksig->ka.sa.sa_handler; 172 + regs->lr = (unsigned long)vdso->rt_signal_retcode; 173 + 174 + adjust_stack: 175 + regs->a0 = ksig->sig; /* first arg is signo */ 176 + regs->a1 = (unsigned long)(&(frame->info)); 177 + regs->a2 = (unsigned long)(&(frame->uc)); 178 + return err; 179 + 180 + give_sigsegv: 181 + if (ksig->sig == SIGSEGV) 182 + ksig->ka.sa.sa_handler = SIG_DFL; 183 + force_sig(SIGSEGV, current); 184 + goto adjust_stack; 185 + } 186 + 187 + /* 188 + * OK, we're invoking a handler 189 + */ 190 + static int 191 + handle_signal(struct ksignal *ksig, struct pt_regs *regs) 192 + { 193 + int ret; 194 + sigset_t *oldset = sigmask_to_save(); 195 + 196 + /* 197 + * set up the stack frame, regardless of SA_SIGINFO, 198 + * and pass info anyway. 199 + */ 200 + ret = setup_rt_frame(ksig, oldset, regs); 201 + 202 + if (ret != 0) { 203 + force_sigsegv(ksig->sig, current); 204 + return ret; 205 + } 206 + 207 + /* Block the signal if we were successful. */ 208 + spin_lock_irq(&current->sighand->siglock); 209 + sigorsets(&current->blocked, &current->blocked, &ksig->ka.sa.sa_mask); 210 + if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) 211 + sigaddset(&current->blocked, ksig->sig); 212 + recalc_sigpending(); 213 + spin_unlock_irq(&current->sighand->siglock); 214 + 215 + return 0; 216 + } 217 + 218 + /* 219 + * Note that 'init' is a special process: it doesn't get signals it doesn't 220 + * want to handle. Thus you cannot kill init even with a SIGKILL even by 221 + * mistake. 222 + * 223 + * Note that we go through the signals twice: once to check the signals 224 + * that the kernel can handle, and then we build all the user-level signal 225 + * handling stack-frames in one go after that. 226 + */ 227 + static void do_signal(struct pt_regs *regs, int syscall) 228 + { 229 + unsigned int retval = 0, continue_addr = 0, restart_addr = 0; 230 + struct ksignal ksig; 231 + 232 + /* 233 + * We want the common case to go fast, which 234 + * is why we may in certain cases get here from 235 + * kernel mode. Just return without doing anything 236 + * if so. 237 + */ 238 + if (!user_mode(regs)) 239 + return; 240 + 241 + current->thread.esp0 = (unsigned long)regs; 242 + 243 + /* 244 + * If we were from a system call, check for system call restarting... 245 + */ 246 + if (syscall) { 247 + continue_addr = regs->pc; 248 + #if defined(__CSKYABIV2__) 249 + restart_addr = continue_addr - 4; 250 + #else 251 + restart_addr = continue_addr - 2; 252 + #endif 253 + retval = regs->a0; 254 + 255 + /* 256 + * Prepare for system call restart. We do this here so that a 257 + * debugger will see the already changed. 258 + */ 259 + switch (retval) { 260 + case -ERESTARTNOHAND: 261 + case -ERESTARTSYS: 262 + case -ERESTARTNOINTR: 263 + regs->a0 = regs->orig_a0; 264 + regs->pc = restart_addr; 265 + break; 266 + case -ERESTART_RESTARTBLOCK: 267 + regs->a0 = -EINTR; 268 + break; 269 + } 270 + } 271 + 272 + if (try_to_freeze()) 273 + goto no_signal; 274 + 275 + /* 276 + * Get the signal to deliver. When running under ptrace, at this 277 + * point the debugger may change all our registers ... 278 + */ 279 + if (get_signal(&ksig)) { 280 + /* 281 + * Depending on the signal settings we may need to revert the 282 + * decision to restart the system call. But skip this if a 283 + * debugger has chosen to restart at a different PC. 284 + */ 285 + if (regs->pc == restart_addr) { 286 + if (retval == -ERESTARTNOHAND || 287 + (retval == -ERESTARTSYS && 288 + !(ksig.ka.sa.sa_flags & SA_RESTART))) { 289 + regs->a0 = -EINTR; 290 + regs->pc = continue_addr; 291 + } 292 + } 293 + 294 + /* Whee! Actually deliver the signal. */ 295 + if (handle_signal(&ksig, regs) == 0) { 296 + /* 297 + * A signal was successfully delivered; the saved 298 + * sigmask will have been stored in the signal frame, 299 + * and will be restored by sigreturn, so we can simply 300 + * clear the TIF_RESTORE_SIGMASK flag. 301 + */ 302 + if (test_thread_flag(TIF_RESTORE_SIGMASK)) 303 + clear_thread_flag(TIF_RESTORE_SIGMASK); 304 + } 305 + return; 306 + } 307 + 308 + no_signal: 309 + if (syscall) { 310 + /* 311 + * Handle restarting a different system call. As above, 312 + * if a debugger has chosen to restart at a different PC, 313 + * ignore the restart. 314 + */ 315 + if (retval == -ERESTART_RESTARTBLOCK 316 + && regs->pc == continue_addr) { 317 + #if defined(__CSKYABIV2__) 318 + regs->regs[3] = __NR_restart_syscall; 319 + regs->pc -= 4; 320 + #else 321 + regs->regs[9] = __NR_restart_syscall; 322 + regs->pc -= 2; 323 + #endif 324 + } 325 + 326 + /* 327 + * If there's no signal to deliver, we just put the saved 328 + * sigmask back. 329 + */ 330 + if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 331 + clear_thread_flag(TIF_RESTORE_SIGMASK); 332 + sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 333 + } 334 + } 335 + } 336 + 337 + asmlinkage void 338 + do_notify_resume(unsigned int thread_flags, struct pt_regs *regs, int syscall) 339 + { 340 + if (thread_flags & _TIF_SIGPENDING) 341 + do_signal(regs, syscall); 342 + 343 + if (thread_flags & _TIF_NOTIFY_RESUME) { 344 + clear_thread_flag(TIF_NOTIFY_RESUME); 345 + tracehook_notify_resume(regs); 346 + } 347 + }
+11
arch/csky/kernel/time.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/clk-provider.h> 5 + #include <linux/clocksource.h> 6 + 7 + void __init time_init(void) 8 + { 9 + of_clk_init(NULL); 10 + timer_probe(); 11 + }