Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RISC-V: Task implementation

This patch contains the implementation of tasks on RISC-V, most of which
is involved in task switching.

Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>

+1243
+1
arch/riscv/include/asm/asm-offsets.h
··· 1 + #include <generated/asm-offsets.h>
+45
arch/riscv/include/asm/current.h
··· 1 + /* 2 + * Based on arm/arm64/include/asm/current.h 3 + * 4 + * Copyright (C) 2016 ARM 5 + * Copyright (C) 2017 SiFive 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License 9 + * as published by the Free Software Foundation, version 2. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + */ 16 + 17 + 18 + #ifndef __ASM_CURRENT_H 19 + #define __ASM_CURRENT_H 20 + 21 + #include <linux/bug.h> 22 + #include <linux/compiler.h> 23 + 24 + #ifndef __ASSEMBLY__ 25 + 26 + struct task_struct; 27 + 28 + /* 29 + * This only works because "struct thread_info" is at offset 0 from "struct 30 + * task_struct". This constraint seems to be necessary on other architectures 31 + * as well, but __switch_to enforces it. We can't check TASK_TI here because 32 + * <asm/asm-offsets.h> includes this, and I can't get the definition of "struct 33 + * task_struct" here due to some header ordering problems. 34 + */ 35 + static __always_inline struct task_struct *get_current(void) 36 + { 37 + register struct task_struct *tp __asm__("tp"); 38 + return tp; 39 + } 40 + 41 + #define current get_current() 42 + 43 + #endif /* __ASSEMBLY__ */ 44 + 45 + #endif /* __ASM_CURRENT_H */
+22
arch/riscv/include/asm/kprobes.h
··· 1 + /* 2 + * Copied from arch/arm64/include/asm/kprobes.h 3 + * 4 + * Copyright (C) 2013 Linaro Limited 5 + * Copyright (C) 2017 SiFive 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 + * General Public License for more details. 15 + */ 16 + 17 + #ifndef _RISCV_KPROBES_H 18 + #define _RISCV_KPROBES_H 19 + 20 + #include <asm-generic/kprobes.h> 21 + 22 + #endif /* _RISCV_KPROBES_H */
+97
arch/riscv/include/asm/processor.h
··· 1 + /* 2 + * Copyright (C) 2012 Regents of the University of California 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #ifndef _ASM_RISCV_PROCESSOR_H 15 + #define _ASM_RISCV_PROCESSOR_H 16 + 17 + #include <linux/const.h> 18 + 19 + #include <asm/ptrace.h> 20 + 21 + /* 22 + * This decides where the kernel will search for a free chunk of vm 23 + * space during mmap's. 24 + */ 25 + #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1) 26 + 27 + #define STACK_TOP TASK_SIZE 28 + #define STACK_TOP_MAX STACK_TOP 29 + #define STACK_ALIGN 16 30 + 31 + #ifndef __ASSEMBLY__ 32 + 33 + struct task_struct; 34 + struct pt_regs; 35 + 36 + /* 37 + * Default implementation of macro that returns current 38 + * instruction pointer ("program counter"). 39 + */ 40 + #define current_text_addr() ({ __label__ _l; _l: &&_l; }) 41 + 42 + /* CPU-specific state of a task */ 43 + struct thread_struct { 44 + /* Callee-saved registers */ 45 + unsigned long ra; 46 + unsigned long sp; /* Kernel mode stack */ 47 + unsigned long s[12]; /* s[0]: frame pointer */ 48 + struct __riscv_d_ext_state fstate; 49 + }; 50 + 51 + #define INIT_THREAD { \ 52 + .sp = sizeof(init_stack) + (long)&init_stack, \ 53 + } 54 + 55 + #define task_pt_regs(tsk) \ 56 + ((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \ 57 + - ALIGN(sizeof(struct pt_regs), STACK_ALIGN))) 58 + 59 + #define KSTK_EIP(tsk) (task_pt_regs(tsk)->sepc) 60 + #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) 61 + 62 + 63 + /* Do necessary setup to start up a newly executed thread. */ 64 + extern void start_thread(struct pt_regs *regs, 65 + unsigned long pc, unsigned long sp); 66 + 67 + /* Free all resources held by a thread. */ 68 + static inline void release_thread(struct task_struct *dead_task) 69 + { 70 + } 71 + 72 + extern unsigned long get_wchan(struct task_struct *p); 73 + 74 + 75 + static inline void cpu_relax(void) 76 + { 77 + #ifdef __riscv_muldiv 78 + int dummy; 79 + /* In lieu of a halt instruction, induce a long-latency stall. */ 80 + __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); 81 + #endif 82 + barrier(); 83 + } 84 + 85 + static inline void wait_for_interrupt(void) 86 + { 87 + __asm__ __volatile__ ("wfi"); 88 + } 89 + 90 + struct device_node; 91 + extern int riscv_of_processor_hart(struct device_node *node); 92 + 93 + extern void riscv_fill_hwcap(void); 94 + 95 + #endif /* __ASSEMBLY__ */ 96 + 97 + #endif /* _ASM_RISCV_PROCESSOR_H */
+69
arch/riscv/include/asm/switch_to.h
··· 1 + /* 2 + * Copyright (C) 2012 Regents of the University of California 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #ifndef _ASM_RISCV_SWITCH_TO_H 15 + #define _ASM_RISCV_SWITCH_TO_H 16 + 17 + #include <asm/processor.h> 18 + #include <asm/ptrace.h> 19 + #include <asm/csr.h> 20 + 21 + extern void __fstate_save(struct task_struct *save_to); 22 + extern void __fstate_restore(struct task_struct *restore_from); 23 + 24 + static inline void __fstate_clean(struct pt_regs *regs) 25 + { 26 + regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN; 27 + } 28 + 29 + static inline void fstate_save(struct task_struct *task, 30 + struct pt_regs *regs) 31 + { 32 + if ((regs->sstatus & SR_FS) == SR_FS_DIRTY) { 33 + __fstate_save(task); 34 + __fstate_clean(regs); 35 + } 36 + } 37 + 38 + static inline void fstate_restore(struct task_struct *task, 39 + struct pt_regs *regs) 40 + { 41 + if ((regs->sstatus & SR_FS) != SR_FS_OFF) { 42 + __fstate_restore(task); 43 + __fstate_clean(regs); 44 + } 45 + } 46 + 47 + static inline void __switch_to_aux(struct task_struct *prev, 48 + struct task_struct *next) 49 + { 50 + struct pt_regs *regs; 51 + 52 + regs = task_pt_regs(prev); 53 + if (unlikely(regs->sstatus & SR_SD)) 54 + fstate_save(prev, regs); 55 + fstate_restore(next, task_pt_regs(next)); 56 + } 57 + 58 + extern struct task_struct *__switch_to(struct task_struct *, 59 + struct task_struct *); 60 + 61 + #define switch_to(prev, next, last) \ 62 + do { \ 63 + struct task_struct *__prev = (prev); \ 64 + struct task_struct *__next = (next); \ 65 + __switch_to_aux(__prev, __next); \ 66 + ((last) = __switch_to(__prev, __next)); \ 67 + } while (0) 68 + 69 + #endif /* _ASM_RISCV_SWITCH_TO_H */
+94
arch/riscv/include/asm/thread_info.h
··· 1 + /* 2 + * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com> 3 + * Copyright (C) 2012 Regents of the University of California 4 + * Copyright (C) 2017 SiFive 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License 8 + * as published by the Free Software Foundation, version 2. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + */ 15 + 16 + #ifndef _ASM_RISCV_THREAD_INFO_H 17 + #define _ASM_RISCV_THREAD_INFO_H 18 + 19 + #include <asm/page.h> 20 + #include <linux/const.h> 21 + 22 + /* thread information allocation */ 23 + #define THREAD_SIZE_ORDER (1) 24 + #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 25 + 26 + #ifndef __ASSEMBLY__ 27 + 28 + #include <asm/processor.h> 29 + #include <asm/csr.h> 30 + 31 + typedef unsigned long mm_segment_t; 32 + 33 + /* 34 + * low level task data that entry.S needs immediate access to 35 + * - this struct should fit entirely inside of one cache line 36 + * - if the members of this struct changes, the assembly constants 37 + * in asm-offsets.c must be updated accordingly 38 + * - thread_info is included in task_struct at an offset of 0. This means that 39 + * tp points to both thread_info and task_struct. 40 + */ 41 + struct thread_info { 42 + unsigned long flags; /* low level flags */ 43 + int preempt_count; /* 0=>preemptible, <0=>BUG */ 44 + mm_segment_t addr_limit; 45 + /* 46 + * These stack pointers are overwritten on every system call or 47 + * exception. SP is also saved to the stack it can be recovered when 48 + * overwritten. 49 + */ 50 + long kernel_sp; /* Kernel stack pointer */ 51 + long user_sp; /* User stack pointer */ 52 + int cpu; 53 + }; 54 + 55 + /* 56 + * macros/functions for gaining access to the thread information structure 57 + * 58 + * preempt_count needs to be 1 initially, until the scheduler is functional. 59 + */ 60 + #define INIT_THREAD_INFO(tsk) \ 61 + { \ 62 + .flags = 0, \ 63 + .preempt_count = INIT_PREEMPT_COUNT, \ 64 + .addr_limit = KERNEL_DS, \ 65 + } 66 + 67 + #define init_stack (init_thread_union.stack) 68 + 69 + #endif /* !__ASSEMBLY__ */ 70 + 71 + /* 72 + * thread information flags 73 + * - these are process state flags that various assembly files may need to 74 + * access 75 + * - pending work-to-be-done flags are in lowest half-word 76 + * - other flags in upper half-word(s) 77 + */ 78 + #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 79 + #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ 80 + #define TIF_SIGPENDING 2 /* signal pending */ 81 + #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 82 + #define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */ 83 + #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ 84 + #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ 85 + 86 + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 87 + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 88 + #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 89 + #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 90 + 91 + #define _TIF_WORK_MASK \ 92 + (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED) 93 + 94 + #endif /* _ASM_RISCV_THREAD_INFO_H */
+322
arch/riscv/kernel/asm-offsets.c
··· 1 + /* 2 + * Copyright (C) 2012 Regents of the University of California 3 + * Copyright (C) 2017 SiFive 4 + * 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License 7 + * as published by the Free Software Foundation, version 2. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + */ 14 + 15 + #define GENERATING_ASM_OFFSETS 16 + 17 + #include <linux/kbuild.h> 18 + #include <linux/sched.h> 19 + #include <asm/thread_info.h> 20 + #include <asm/ptrace.h> 21 + 22 + void asm_offsets(void) 23 + { 24 + OFFSET(TASK_THREAD_RA, task_struct, thread.ra); 25 + OFFSET(TASK_THREAD_SP, task_struct, thread.sp); 26 + OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]); 27 + OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]); 28 + OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]); 29 + OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]); 30 + OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]); 31 + OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]); 32 + OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]); 33 + OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]); 34 + OFFSET(TASK_THREAD_S8, task_struct, thread.s[8]); 35 + OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]); 36 + OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]); 37 + OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]); 38 + OFFSET(TASK_THREAD_SP, task_struct, thread.sp); 39 + OFFSET(TASK_STACK, task_struct, stack); 40 + OFFSET(TASK_TI, task_struct, thread_info); 41 + OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); 42 + OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); 43 + OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); 44 + OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu); 45 + 46 + OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]); 47 + OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]); 48 + OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]); 49 + OFFSET(TASK_THREAD_F3, task_struct, thread.fstate.f[3]); 50 + OFFSET(TASK_THREAD_F4, task_struct, thread.fstate.f[4]); 51 + OFFSET(TASK_THREAD_F5, task_struct, thread.fstate.f[5]); 52 + OFFSET(TASK_THREAD_F6, task_struct, thread.fstate.f[6]); 53 + OFFSET(TASK_THREAD_F7, task_struct, thread.fstate.f[7]); 54 + OFFSET(TASK_THREAD_F8, task_struct, thread.fstate.f[8]); 55 + OFFSET(TASK_THREAD_F9, task_struct, thread.fstate.f[9]); 56 + OFFSET(TASK_THREAD_F10, task_struct, thread.fstate.f[10]); 57 + OFFSET(TASK_THREAD_F11, task_struct, thread.fstate.f[11]); 58 + OFFSET(TASK_THREAD_F12, task_struct, thread.fstate.f[12]); 59 + OFFSET(TASK_THREAD_F13, task_struct, thread.fstate.f[13]); 60 + OFFSET(TASK_THREAD_F14, task_struct, thread.fstate.f[14]); 61 + OFFSET(TASK_THREAD_F15, task_struct, thread.fstate.f[15]); 62 + OFFSET(TASK_THREAD_F16, task_struct, thread.fstate.f[16]); 63 + OFFSET(TASK_THREAD_F17, task_struct, thread.fstate.f[17]); 64 + OFFSET(TASK_THREAD_F18, task_struct, thread.fstate.f[18]); 65 + OFFSET(TASK_THREAD_F19, task_struct, thread.fstate.f[19]); 66 + OFFSET(TASK_THREAD_F20, task_struct, thread.fstate.f[20]); 67 + OFFSET(TASK_THREAD_F21, task_struct, thread.fstate.f[21]); 68 + OFFSET(TASK_THREAD_F22, task_struct, thread.fstate.f[22]); 69 + OFFSET(TASK_THREAD_F23, task_struct, thread.fstate.f[23]); 70 + OFFSET(TASK_THREAD_F24, task_struct, thread.fstate.f[24]); 71 + OFFSET(TASK_THREAD_F25, task_struct, thread.fstate.f[25]); 72 + OFFSET(TASK_THREAD_F26, task_struct, thread.fstate.f[26]); 73 + OFFSET(TASK_THREAD_F27, task_struct, thread.fstate.f[27]); 74 + OFFSET(TASK_THREAD_F28, task_struct, thread.fstate.f[28]); 75 + OFFSET(TASK_THREAD_F29, task_struct, thread.fstate.f[29]); 76 + OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]); 77 + OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]); 78 + OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr); 79 + 80 + DEFINE(PT_SIZE, sizeof(struct pt_regs)); 81 + OFFSET(PT_SEPC, pt_regs, sepc); 82 + OFFSET(PT_RA, pt_regs, ra); 83 + OFFSET(PT_FP, pt_regs, s0); 84 + OFFSET(PT_S0, pt_regs, s0); 85 + OFFSET(PT_S1, pt_regs, s1); 86 + OFFSET(PT_S2, pt_regs, s2); 87 + OFFSET(PT_S3, pt_regs, s3); 88 + OFFSET(PT_S4, pt_regs, s4); 89 + OFFSET(PT_S5, pt_regs, s5); 90 + OFFSET(PT_S6, pt_regs, s6); 91 + OFFSET(PT_S7, pt_regs, s7); 92 + OFFSET(PT_S8, pt_regs, s8); 93 + OFFSET(PT_S9, pt_regs, s9); 94 + OFFSET(PT_S10, pt_regs, s10); 95 + OFFSET(PT_S11, pt_regs, s11); 96 + OFFSET(PT_SP, pt_regs, sp); 97 + OFFSET(PT_TP, pt_regs, tp); 98 + OFFSET(PT_A0, pt_regs, a0); 99 + OFFSET(PT_A1, pt_regs, a1); 100 + OFFSET(PT_A2, pt_regs, a2); 101 + OFFSET(PT_A3, pt_regs, a3); 102 + OFFSET(PT_A4, pt_regs, a4); 103 + OFFSET(PT_A5, pt_regs, a5); 104 + OFFSET(PT_A6, pt_regs, a6); 105 + OFFSET(PT_A7, pt_regs, a7); 106 + OFFSET(PT_T0, pt_regs, t0); 107 + OFFSET(PT_T1, pt_regs, t1); 108 + OFFSET(PT_T2, pt_regs, t2); 109 + OFFSET(PT_T3, pt_regs, t3); 110 + OFFSET(PT_T4, pt_regs, t4); 111 + OFFSET(PT_T5, pt_regs, t5); 112 + OFFSET(PT_T6, pt_regs, t6); 113 + OFFSET(PT_GP, pt_regs, gp); 114 + OFFSET(PT_ORIG_A0, pt_regs, orig_a0); 115 + OFFSET(PT_SSTATUS, pt_regs, sstatus); 116 + OFFSET(PT_SBADADDR, pt_regs, sbadaddr); 117 + OFFSET(PT_SCAUSE, pt_regs, scause); 118 + 119 + /* 120 + * THREAD_{F,X}* might be larger than a S-type offset can handle, but 121 + * these are used in performance-sensitive assembly so we can't resort 122 + * to loading the long immediate every time. 123 + */ 124 + DEFINE(TASK_THREAD_RA_RA, 125 + offsetof(struct task_struct, thread.ra) 126 + - offsetof(struct task_struct, thread.ra) 127 + ); 128 + DEFINE(TASK_THREAD_SP_RA, 129 + offsetof(struct task_struct, thread.sp) 130 + - offsetof(struct task_struct, thread.ra) 131 + ); 132 + DEFINE(TASK_THREAD_S0_RA, 133 + offsetof(struct task_struct, thread.s[0]) 134 + - offsetof(struct task_struct, thread.ra) 135 + ); 136 + DEFINE(TASK_THREAD_S1_RA, 137 + offsetof(struct task_struct, thread.s[1]) 138 + - offsetof(struct task_struct, thread.ra) 139 + ); 140 + DEFINE(TASK_THREAD_S2_RA, 141 + offsetof(struct task_struct, thread.s[2]) 142 + - offsetof(struct task_struct, thread.ra) 143 + ); 144 + DEFINE(TASK_THREAD_S3_RA, 145 + offsetof(struct task_struct, thread.s[3]) 146 + - offsetof(struct task_struct, thread.ra) 147 + ); 148 + DEFINE(TASK_THREAD_S4_RA, 149 + offsetof(struct task_struct, thread.s[4]) 150 + - offsetof(struct task_struct, thread.ra) 151 + ); 152 + DEFINE(TASK_THREAD_S5_RA, 153 + offsetof(struct task_struct, thread.s[5]) 154 + - offsetof(struct task_struct, thread.ra) 155 + ); 156 + DEFINE(TASK_THREAD_S6_RA, 157 + offsetof(struct task_struct, thread.s[6]) 158 + - offsetof(struct task_struct, thread.ra) 159 + ); 160 + DEFINE(TASK_THREAD_S7_RA, 161 + offsetof(struct task_struct, thread.s[7]) 162 + - offsetof(struct task_struct, thread.ra) 163 + ); 164 + DEFINE(TASK_THREAD_S8_RA, 165 + offsetof(struct task_struct, thread.s[8]) 166 + - offsetof(struct task_struct, thread.ra) 167 + ); 168 + DEFINE(TASK_THREAD_S9_RA, 169 + offsetof(struct task_struct, thread.s[9]) 170 + - offsetof(struct task_struct, thread.ra) 171 + ); 172 + DEFINE(TASK_THREAD_S10_RA, 173 + offsetof(struct task_struct, thread.s[10]) 174 + - offsetof(struct task_struct, thread.ra) 175 + ); 176 + DEFINE(TASK_THREAD_S11_RA, 177 + offsetof(struct task_struct, thread.s[11]) 178 + - offsetof(struct task_struct, thread.ra) 179 + ); 180 + 181 + DEFINE(TASK_THREAD_F0_F0, 182 + offsetof(struct task_struct, thread.fstate.f[0]) 183 + - offsetof(struct task_struct, thread.fstate.f[0]) 184 + ); 185 + DEFINE(TASK_THREAD_F1_F0, 186 + offsetof(struct task_struct, thread.fstate.f[1]) 187 + - offsetof(struct task_struct, thread.fstate.f[0]) 188 + ); 189 + DEFINE(TASK_THREAD_F2_F0, 190 + offsetof(struct task_struct, thread.fstate.f[2]) 191 + - offsetof(struct task_struct, thread.fstate.f[0]) 192 + ); 193 + DEFINE(TASK_THREAD_F3_F0, 194 + offsetof(struct task_struct, thread.fstate.f[3]) 195 + - offsetof(struct task_struct, thread.fstate.f[0]) 196 + ); 197 + DEFINE(TASK_THREAD_F4_F0, 198 + offsetof(struct task_struct, thread.fstate.f[4]) 199 + - offsetof(struct task_struct, thread.fstate.f[0]) 200 + ); 201 + DEFINE(TASK_THREAD_F5_F0, 202 + offsetof(struct task_struct, thread.fstate.f[5]) 203 + - offsetof(struct task_struct, thread.fstate.f[0]) 204 + ); 205 + DEFINE(TASK_THREAD_F6_F0, 206 + offsetof(struct task_struct, thread.fstate.f[6]) 207 + - offsetof(struct task_struct, thread.fstate.f[0]) 208 + ); 209 + DEFINE(TASK_THREAD_F7_F0, 210 + offsetof(struct task_struct, thread.fstate.f[7]) 211 + - offsetof(struct task_struct, thread.fstate.f[0]) 212 + ); 213 + DEFINE(TASK_THREAD_F8_F0, 214 + offsetof(struct task_struct, thread.fstate.f[8]) 215 + - offsetof(struct task_struct, thread.fstate.f[0]) 216 + ); 217 + DEFINE(TASK_THREAD_F9_F0, 218 + offsetof(struct task_struct, thread.fstate.f[9]) 219 + - offsetof(struct task_struct, thread.fstate.f[0]) 220 + ); 221 + DEFINE(TASK_THREAD_F10_F0, 222 + offsetof(struct task_struct, thread.fstate.f[10]) 223 + - offsetof(struct task_struct, thread.fstate.f[0]) 224 + ); 225 + DEFINE(TASK_THREAD_F11_F0, 226 + offsetof(struct task_struct, thread.fstate.f[11]) 227 + - offsetof(struct task_struct, thread.fstate.f[0]) 228 + ); 229 + DEFINE(TASK_THREAD_F12_F0, 230 + offsetof(struct task_struct, thread.fstate.f[12]) 231 + - offsetof(struct task_struct, thread.fstate.f[0]) 232 + ); 233 + DEFINE(TASK_THREAD_F13_F0, 234 + offsetof(struct task_struct, thread.fstate.f[13]) 235 + - offsetof(struct task_struct, thread.fstate.f[0]) 236 + ); 237 + DEFINE(TASK_THREAD_F14_F0, 238 + offsetof(struct task_struct, thread.fstate.f[14]) 239 + - offsetof(struct task_struct, thread.fstate.f[0]) 240 + ); 241 + DEFINE(TASK_THREAD_F15_F0, 242 + offsetof(struct task_struct, thread.fstate.f[15]) 243 + - offsetof(struct task_struct, thread.fstate.f[0]) 244 + ); 245 + DEFINE(TASK_THREAD_F16_F0, 246 + offsetof(struct task_struct, thread.fstate.f[16]) 247 + - offsetof(struct task_struct, thread.fstate.f[0]) 248 + ); 249 + DEFINE(TASK_THREAD_F17_F0, 250 + offsetof(struct task_struct, thread.fstate.f[17]) 251 + - offsetof(struct task_struct, thread.fstate.f[0]) 252 + ); 253 + DEFINE(TASK_THREAD_F18_F0, 254 + offsetof(struct task_struct, thread.fstate.f[18]) 255 + - offsetof(struct task_struct, thread.fstate.f[0]) 256 + ); 257 + DEFINE(TASK_THREAD_F19_F0, 258 + offsetof(struct task_struct, thread.fstate.f[19]) 259 + - offsetof(struct task_struct, thread.fstate.f[0]) 260 + ); 261 + DEFINE(TASK_THREAD_F20_F0, 262 + offsetof(struct task_struct, thread.fstate.f[20]) 263 + - offsetof(struct task_struct, thread.fstate.f[0]) 264 + ); 265 + DEFINE(TASK_THREAD_F21_F0, 266 + offsetof(struct task_struct, thread.fstate.f[21]) 267 + - offsetof(struct task_struct, thread.fstate.f[0]) 268 + ); 269 + DEFINE(TASK_THREAD_F22_F0, 270 + offsetof(struct task_struct, thread.fstate.f[22]) 271 + - offsetof(struct task_struct, thread.fstate.f[0]) 272 + ); 273 + DEFINE(TASK_THREAD_F23_F0, 274 + offsetof(struct task_struct, thread.fstate.f[23]) 275 + - offsetof(struct task_struct, thread.fstate.f[0]) 276 + ); 277 + DEFINE(TASK_THREAD_F24_F0, 278 + offsetof(struct task_struct, thread.fstate.f[24]) 279 + - offsetof(struct task_struct, thread.fstate.f[0]) 280 + ); 281 + DEFINE(TASK_THREAD_F25_F0, 282 + offsetof(struct task_struct, thread.fstate.f[25]) 283 + - offsetof(struct task_struct, thread.fstate.f[0]) 284 + ); 285 + DEFINE(TASK_THREAD_F26_F0, 286 + offsetof(struct task_struct, thread.fstate.f[26]) 287 + - offsetof(struct task_struct, thread.fstate.f[0]) 288 + ); 289 + DEFINE(TASK_THREAD_F27_F0, 290 + offsetof(struct task_struct, thread.fstate.f[27]) 291 + - offsetof(struct task_struct, thread.fstate.f[0]) 292 + ); 293 + DEFINE(TASK_THREAD_F28_F0, 294 + offsetof(struct task_struct, thread.fstate.f[28]) 295 + - offsetof(struct task_struct, thread.fstate.f[0]) 296 + ); 297 + DEFINE(TASK_THREAD_F29_F0, 298 + offsetof(struct task_struct, thread.fstate.f[29]) 299 + - offsetof(struct task_struct, thread.fstate.f[0]) 300 + ); 301 + DEFINE(TASK_THREAD_F30_F0, 302 + offsetof(struct task_struct, thread.fstate.f[30]) 303 + - offsetof(struct task_struct, thread.fstate.f[0]) 304 + ); 305 + DEFINE(TASK_THREAD_F31_F0, 306 + offsetof(struct task_struct, thread.fstate.f[31]) 307 + - offsetof(struct task_struct, thread.fstate.f[0]) 308 + ); 309 + DEFINE(TASK_THREAD_FCSR_F0, 310 + offsetof(struct task_struct, thread.fstate.fcsr) 311 + - offsetof(struct task_struct, thread.fstate.f[0]) 312 + ); 313 + 314 + /* The assembler needs access to THREAD_SIZE as well. */ 315 + DEFINE(ASM_THREAD_SIZE, THREAD_SIZE); 316 + 317 + /* 318 + * We allocate a pt_regs on the stack when entering the kernel. This 319 + * ensures the alignment is sane. 320 + */ 321 + DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN)); 322 + }
+464
arch/riscv/kernel/entry.S
··· 1 + /* 2 + * Copyright (C) 2012 Regents of the University of California 3 + * Copyright (C) 2017 SiFive 4 + * 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License 7 + * as published by the Free Software Foundation, version 2. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + */ 14 + 15 + #include <linux/init.h> 16 + #include <linux/linkage.h> 17 + 18 + #include <asm/asm.h> 19 + #include <asm/csr.h> 20 + #include <asm/unistd.h> 21 + #include <asm/thread_info.h> 22 + #include <asm/asm-offsets.h> 23 + 24 + .text 25 + .altmacro 26 + 27 + /* 28 + * Prepares to enter a system call or exception by saving all registers to the 29 + * stack. 30 + */ 31 + .macro SAVE_ALL 32 + LOCAL _restore_kernel_tpsp 33 + LOCAL _save_context 34 + 35 + /* 36 + * If coming from userspace, preserve the user thread pointer and load 37 + * the kernel thread pointer. If we came from the kernel, sscratch 38 + * will contain 0, and we should continue on the current TP. 39 + */ 40 + csrrw tp, sscratch, tp 41 + bnez tp, _save_context 42 + 43 + _restore_kernel_tpsp: 44 + csrr tp, sscratch 45 + REG_S sp, TASK_TI_KERNEL_SP(tp) 46 + _save_context: 47 + REG_S sp, TASK_TI_USER_SP(tp) 48 + REG_L sp, TASK_TI_KERNEL_SP(tp) 49 + addi sp, sp, -(PT_SIZE_ON_STACK) 50 + REG_S x1, PT_RA(sp) 51 + REG_S x3, PT_GP(sp) 52 + REG_S x5, PT_T0(sp) 53 + REG_S x6, PT_T1(sp) 54 + REG_S x7, PT_T2(sp) 55 + REG_S x8, PT_S0(sp) 56 + REG_S x9, PT_S1(sp) 57 + REG_S x10, PT_A0(sp) 58 + REG_S x11, PT_A1(sp) 59 + REG_S x12, PT_A2(sp) 60 + REG_S x13, PT_A3(sp) 61 + REG_S x14, PT_A4(sp) 62 + REG_S x15, PT_A5(sp) 63 + REG_S x16, PT_A6(sp) 64 + REG_S x17, PT_A7(sp) 65 + REG_S x18, PT_S2(sp) 66 + REG_S x19, PT_S3(sp) 67 + REG_S x20, PT_S4(sp) 68 + REG_S x21, PT_S5(sp) 69 + REG_S x22, PT_S6(sp) 70 + REG_S x23, PT_S7(sp) 71 + REG_S x24, PT_S8(sp) 72 + REG_S x25, PT_S9(sp) 73 + REG_S x26, PT_S10(sp) 74 + REG_S x27, PT_S11(sp) 75 + REG_S x28, PT_T3(sp) 76 + REG_S x29, PT_T4(sp) 77 + REG_S x30, PT_T5(sp) 78 + REG_S x31, PT_T6(sp) 79 + 80 + /* 81 + * Disable FPU to detect illegal usage of 82 + * floating point in kernel space 83 + */ 84 + li t0, SR_FS 85 + 86 + REG_L s0, TASK_TI_USER_SP(tp) 87 + csrrc s1, sstatus, t0 88 + csrr s2, sepc 89 + csrr s3, sbadaddr 90 + csrr s4, scause 91 + csrr s5, sscratch 92 + REG_S s0, PT_SP(sp) 93 + REG_S s1, PT_SSTATUS(sp) 94 + REG_S s2, PT_SEPC(sp) 95 + REG_S s3, PT_SBADADDR(sp) 96 + REG_S s4, PT_SCAUSE(sp) 97 + REG_S s5, PT_TP(sp) 98 + .endm 99 + 100 + /* 101 + * Prepares to return from a system call or exception by restoring all 102 + * registers from the stack. 103 + */ 104 + .macro RESTORE_ALL 105 + REG_L a0, PT_SSTATUS(sp) 106 + REG_L a2, PT_SEPC(sp) 107 + csrw sstatus, a0 108 + csrw sepc, a2 109 + 110 + REG_L x1, PT_RA(sp) 111 + REG_L x3, PT_GP(sp) 112 + REG_L x4, PT_TP(sp) 113 + REG_L x5, PT_T0(sp) 114 + REG_L x6, PT_T1(sp) 115 + REG_L x7, PT_T2(sp) 116 + REG_L x8, PT_S0(sp) 117 + REG_L x9, PT_S1(sp) 118 + REG_L x10, PT_A0(sp) 119 + REG_L x11, PT_A1(sp) 120 + REG_L x12, PT_A2(sp) 121 + REG_L x13, PT_A3(sp) 122 + REG_L x14, PT_A4(sp) 123 + REG_L x15, PT_A5(sp) 124 + REG_L x16, PT_A6(sp) 125 + REG_L x17, PT_A7(sp) 126 + REG_L x18, PT_S2(sp) 127 + REG_L x19, PT_S3(sp) 128 + REG_L x20, PT_S4(sp) 129 + REG_L x21, PT_S5(sp) 130 + REG_L x22, PT_S6(sp) 131 + REG_L x23, PT_S7(sp) 132 + REG_L x24, PT_S8(sp) 133 + REG_L x25, PT_S9(sp) 134 + REG_L x26, PT_S10(sp) 135 + REG_L x27, PT_S11(sp) 136 + REG_L x28, PT_T3(sp) 137 + REG_L x29, PT_T4(sp) 138 + REG_L x30, PT_T5(sp) 139 + REG_L x31, PT_T6(sp) 140 + 141 + REG_L x2, PT_SP(sp) 142 + .endm 143 + 144 + ENTRY(handle_exception) 145 + SAVE_ALL 146 + 147 + /* 148 + * Set sscratch register to 0, so that if a recursive exception 149 + * occurs, the exception vector knows it came from the kernel 150 + */ 151 + csrw sscratch, x0 152 + 153 + /* Load the global pointer */ 154 + .option push 155 + .option norelax 156 + la gp, __global_pointer$ 157 + .option pop 158 + 159 + la ra, ret_from_exception 160 + /* 161 + * MSB of cause differentiates between 162 + * interrupts and exceptions 163 + */ 164 + bge s4, zero, 1f 165 + 166 + /* Handle interrupts */ 167 + slli a0, s4, 1 168 + srli a0, a0, 1 169 + move a1, sp /* pt_regs */ 170 + tail do_IRQ 171 + 1: 172 + /* Handle syscalls */ 173 + li t0, EXC_SYSCALL 174 + beq s4, t0, handle_syscall 175 + 176 + /* Handle other exceptions */ 177 + slli t0, s4, RISCV_LGPTR 178 + la t1, excp_vect_table 179 + la t2, excp_vect_table_end 180 + move a0, sp /* pt_regs */ 181 + add t0, t1, t0 182 + /* Check if exception code lies within bounds */ 183 + bgeu t0, t2, 1f 184 + REG_L t0, 0(t0) 185 + jr t0 186 + 1: 187 + tail do_trap_unknown 188 + 189 + handle_syscall: 190 + /* save the initial A0 value (needed in signal handlers) */ 191 + REG_S a0, PT_ORIG_A0(sp) 192 + /* 193 + * Advance SEPC to avoid executing the original 194 + * scall instruction on sret 195 + */ 196 + addi s2, s2, 0x4 197 + REG_S s2, PT_SEPC(sp) 198 + /* System calls run with interrupts enabled */ 199 + csrs sstatus, SR_IE 200 + /* Trace syscalls, but only if requested by the user. */ 201 + REG_L t0, TASK_TI_FLAGS(tp) 202 + andi t0, t0, _TIF_SYSCALL_TRACE 203 + bnez t0, handle_syscall_trace_enter 204 + check_syscall_nr: 205 + /* Check to make sure we don't jump to a bogus syscall number. */ 206 + li t0, __NR_syscalls 207 + la s0, sys_ni_syscall 208 + /* Syscall number held in a7 */ 209 + bgeu a7, t0, 1f 210 + la s0, sys_call_table 211 + slli t0, a7, RISCV_LGPTR 212 + add s0, s0, t0 213 + REG_L s0, 0(s0) 214 + 1: 215 + jalr s0 216 + 217 + ret_from_syscall: 218 + /* Set user a0 to kernel a0 */ 219 + REG_S a0, PT_A0(sp) 220 + /* Trace syscalls, but only if requested by the user. */ 221 + REG_L t0, TASK_TI_FLAGS(tp) 222 + andi t0, t0, _TIF_SYSCALL_TRACE 223 + bnez t0, handle_syscall_trace_exit 224 + 225 + ret_from_exception: 226 + REG_L s0, PT_SSTATUS(sp) 227 + csrc sstatus, SR_IE 228 + andi s0, s0, SR_PS 229 + bnez s0, restore_all 230 + 231 + resume_userspace: 232 + /* Interrupts must be disabled here so flags are checked atomically */ 233 + REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ 234 + andi s1, s0, _TIF_WORK_MASK 235 + bnez s1, work_pending 236 + 237 + /* Save unwound kernel stack pointer in thread_info */ 238 + addi s0, sp, PT_SIZE_ON_STACK 239 + REG_S s0, TASK_TI_KERNEL_SP(tp) 240 + 241 + /* 242 + * Save TP into sscratch, so we can find the kernel data structures 243 + * again. 244 + */ 245 + csrw sscratch, tp 246 + 247 + restore_all: 248 + RESTORE_ALL 249 + sret 250 + 251 + work_pending: 252 + /* Enter slow path for supplementary processing */ 253 + la ra, ret_from_exception 254 + andi s1, s0, _TIF_NEED_RESCHED 255 + bnez s1, work_resched 256 + work_notifysig: 257 + /* Handle pending signals and notify-resume requests */ 258 + csrs sstatus, SR_IE /* Enable interrupts for do_notify_resume() */ 259 + move a0, sp /* pt_regs */ 260 + move a1, s0 /* current_thread_info->flags */ 261 + tail do_notify_resume 262 + work_resched: 263 + tail schedule 264 + 265 + /* Slow paths for ptrace. */ 266 + handle_syscall_trace_enter: 267 + move a0, sp 268 + call do_syscall_trace_enter 269 + REG_L a0, PT_A0(sp) 270 + REG_L a1, PT_A1(sp) 271 + REG_L a2, PT_A2(sp) 272 + REG_L a3, PT_A3(sp) 273 + REG_L a4, PT_A4(sp) 274 + REG_L a5, PT_A5(sp) 275 + REG_L a6, PT_A6(sp) 276 + REG_L a7, PT_A7(sp) 277 + j check_syscall_nr 278 + handle_syscall_trace_exit: 279 + move a0, sp 280 + call do_syscall_trace_exit 281 + j ret_from_exception 282 + 283 + END(handle_exception) 284 + 285 + ENTRY(ret_from_fork) 286 + la ra, ret_from_exception 287 + tail schedule_tail 288 + ENDPROC(ret_from_fork) 289 + 290 + ENTRY(ret_from_kernel_thread) 291 + call schedule_tail 292 + /* Call fn(arg) */ 293 + la ra, ret_from_exception 294 + move a0, s1 295 + jr s0 296 + ENDPROC(ret_from_kernel_thread) 297 + 298 + 299 + /* 300 + * Integer register context switch 301 + * The callee-saved registers must be saved and restored. 302 + * 303 + * a0: previous task_struct (must be preserved across the switch) 304 + * a1: next task_struct 305 + * 306 + * The value of a0 and a1 must be preserved by this function, as that's how 307 + * arguments are passed to schedule_tail. 308 + */ 309 + ENTRY(__switch_to) 310 + /* Save context into prev->thread */ 311 + li a4, TASK_THREAD_RA 312 + add a3, a0, a4 313 + add a4, a1, a4 314 + REG_S ra, TASK_THREAD_RA_RA(a3) 315 + REG_S sp, TASK_THREAD_SP_RA(a3) 316 + REG_S s0, TASK_THREAD_S0_RA(a3) 317 + REG_S s1, TASK_THREAD_S1_RA(a3) 318 + REG_S s2, TASK_THREAD_S2_RA(a3) 319 + REG_S s3, TASK_THREAD_S3_RA(a3) 320 + REG_S s4, TASK_THREAD_S4_RA(a3) 321 + REG_S s5, TASK_THREAD_S5_RA(a3) 322 + REG_S s6, TASK_THREAD_S6_RA(a3) 323 + REG_S s7, TASK_THREAD_S7_RA(a3) 324 + REG_S s8, TASK_THREAD_S8_RA(a3) 325 + REG_S s9, TASK_THREAD_S9_RA(a3) 326 + REG_S s10, TASK_THREAD_S10_RA(a3) 327 + REG_S s11, TASK_THREAD_S11_RA(a3) 328 + /* Restore context from next->thread */ 329 + REG_L ra, TASK_THREAD_RA_RA(a4) 330 + REG_L sp, TASK_THREAD_SP_RA(a4) 331 + REG_L s0, TASK_THREAD_S0_RA(a4) 332 + REG_L s1, TASK_THREAD_S1_RA(a4) 333 + REG_L s2, TASK_THREAD_S2_RA(a4) 334 + REG_L s3, TASK_THREAD_S3_RA(a4) 335 + REG_L s4, TASK_THREAD_S4_RA(a4) 336 + REG_L s5, TASK_THREAD_S5_RA(a4) 337 + REG_L s6, TASK_THREAD_S6_RA(a4) 338 + REG_L s7, TASK_THREAD_S7_RA(a4) 339 + REG_L s8, TASK_THREAD_S8_RA(a4) 340 + REG_L s9, TASK_THREAD_S9_RA(a4) 341 + REG_L s10, TASK_THREAD_S10_RA(a4) 342 + REG_L s11, TASK_THREAD_S11_RA(a4) 343 + /* Swap the CPU entry around. */ 344 + lw a3, TASK_TI_CPU(a0) 345 + lw a4, TASK_TI_CPU(a1) 346 + sw a3, TASK_TI_CPU(a1) 347 + sw a4, TASK_TI_CPU(a0) 348 + #if TASK_TI != 0 349 + #error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work." 350 + addi tp, a1, TASK_TI 351 + #else 352 + move tp, a1 353 + #endif 354 + ret 355 + ENDPROC(__switch_to) 356 + 357 + ENTRY(__fstate_save) 358 + li a2, TASK_THREAD_F0 359 + add a0, a0, a2 360 + li t1, SR_FS 361 + csrs sstatus, t1 362 + frcsr t0 363 + fsd f0, TASK_THREAD_F0_F0(a0) 364 + fsd f1, TASK_THREAD_F1_F0(a0) 365 + fsd f2, TASK_THREAD_F2_F0(a0) 366 + fsd f3, TASK_THREAD_F3_F0(a0) 367 + fsd f4, TASK_THREAD_F4_F0(a0) 368 + fsd f5, TASK_THREAD_F5_F0(a0) 369 + fsd f6, TASK_THREAD_F6_F0(a0) 370 + fsd f7, TASK_THREAD_F7_F0(a0) 371 + fsd f8, TASK_THREAD_F8_F0(a0) 372 + fsd f9, TASK_THREAD_F9_F0(a0) 373 + fsd f10, TASK_THREAD_F10_F0(a0) 374 + fsd f11, TASK_THREAD_F11_F0(a0) 375 + fsd f12, TASK_THREAD_F12_F0(a0) 376 + fsd f13, TASK_THREAD_F13_F0(a0) 377 + fsd f14, TASK_THREAD_F14_F0(a0) 378 + fsd f15, TASK_THREAD_F15_F0(a0) 379 + fsd f16, TASK_THREAD_F16_F0(a0) 380 + fsd f17, TASK_THREAD_F17_F0(a0) 381 + fsd f18, TASK_THREAD_F18_F0(a0) 382 + fsd f19, TASK_THREAD_F19_F0(a0) 383 + fsd f20, TASK_THREAD_F20_F0(a0) 384 + fsd f21, TASK_THREAD_F21_F0(a0) 385 + fsd f22, TASK_THREAD_F22_F0(a0) 386 + fsd f23, TASK_THREAD_F23_F0(a0) 387 + fsd f24, TASK_THREAD_F24_F0(a0) 388 + fsd f25, TASK_THREAD_F25_F0(a0) 389 + fsd f26, TASK_THREAD_F26_F0(a0) 390 + fsd f27, TASK_THREAD_F27_F0(a0) 391 + fsd f28, TASK_THREAD_F28_F0(a0) 392 + fsd f29, TASK_THREAD_F29_F0(a0) 393 + fsd f30, TASK_THREAD_F30_F0(a0) 394 + fsd f31, TASK_THREAD_F31_F0(a0) 395 + sw t0, TASK_THREAD_FCSR_F0(a0) 396 + csrc sstatus, t1 397 + ret 398 + ENDPROC(__fstate_save) 399 + 400 + ENTRY(__fstate_restore) 401 + li a2, TASK_THREAD_F0 402 + add a0, a0, a2 403 + li t1, SR_FS 404 + lw t0, TASK_THREAD_FCSR_F0(a0) 405 + csrs sstatus, t1 406 + fld f0, TASK_THREAD_F0_F0(a0) 407 + fld f1, TASK_THREAD_F1_F0(a0) 408 + fld f2, TASK_THREAD_F2_F0(a0) 409 + fld f3, TASK_THREAD_F3_F0(a0) 410 + fld f4, TASK_THREAD_F4_F0(a0) 411 + fld f5, TASK_THREAD_F5_F0(a0) 412 + fld f6, TASK_THREAD_F6_F0(a0) 413 + fld f7, TASK_THREAD_F7_F0(a0) 414 + fld f8, TASK_THREAD_F8_F0(a0) 415 + fld f9, TASK_THREAD_F9_F0(a0) 416 + fld f10, TASK_THREAD_F10_F0(a0) 417 + fld f11, TASK_THREAD_F11_F0(a0) 418 + fld f12, TASK_THREAD_F12_F0(a0) 419 + fld f13, TASK_THREAD_F13_F0(a0) 420 + fld f14, TASK_THREAD_F14_F0(a0) 421 + fld f15, TASK_THREAD_F15_F0(a0) 422 + fld f16, TASK_THREAD_F16_F0(a0) 423 + fld f17, TASK_THREAD_F17_F0(a0) 424 + fld f18, TASK_THREAD_F18_F0(a0) 425 + fld f19, TASK_THREAD_F19_F0(a0) 426 + fld f20, TASK_THREAD_F20_F0(a0) 427 + fld f21, TASK_THREAD_F21_F0(a0) 428 + fld f22, TASK_THREAD_F22_F0(a0) 429 + fld f23, TASK_THREAD_F23_F0(a0) 430 + fld f24, TASK_THREAD_F24_F0(a0) 431 + fld f25, TASK_THREAD_F25_F0(a0) 432 + fld f26, TASK_THREAD_F26_F0(a0) 433 + fld f27, TASK_THREAD_F27_F0(a0) 434 + fld f28, TASK_THREAD_F28_F0(a0) 435 + fld f29, TASK_THREAD_F29_F0(a0) 436 + fld f30, TASK_THREAD_F30_F0(a0) 437 + fld f31, TASK_THREAD_F31_F0(a0) 438 + fscsr t0 439 + csrc sstatus, t1 440 + ret 441 + ENDPROC(__fstate_restore) 442 + 443 + 444 + .section ".rodata" 445 + /* Exception vector table */ 446 + ENTRY(excp_vect_table) 447 + RISCV_PTR do_trap_insn_misaligned 448 + RISCV_PTR do_trap_insn_fault 449 + RISCV_PTR do_trap_insn_illegal 450 + RISCV_PTR do_trap_break 451 + RISCV_PTR do_trap_load_misaligned 452 + RISCV_PTR do_trap_load_fault 453 + RISCV_PTR do_trap_store_misaligned 454 + RISCV_PTR do_trap_store_fault 455 + RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ 456 + RISCV_PTR do_trap_ecall_s 457 + RISCV_PTR do_trap_unknown 458 + RISCV_PTR do_trap_ecall_m 459 + RISCV_PTR do_page_fault /* instruction page fault */ 460 + RISCV_PTR do_page_fault /* load page fault */ 461 + RISCV_PTR do_trap_unknown 462 + RISCV_PTR do_page_fault /* store page fault */ 463 + excp_vect_table_end: 464 + END(excp_vect_table)
+129
arch/riscv/kernel/process.c
··· 1 + /* 2 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 3 + * Chen Liqin <liqin.chen@sunplusct.com> 4 + * Lennox Wu <lennox.wu@sunplusct.com> 5 + * Copyright (C) 2012 Regents of the University of California 6 + * Copyright (C) 2017 SiFive 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, see the file COPYING, or write 20 + * to the Free Software Foundation, Inc., 21 + */ 22 + 23 + #include <linux/kernel.h> 24 + #include <linux/sched.h> 25 + #include <linux/sched/task_stack.h> 26 + #include <linux/tick.h> 27 + #include <linux/ptrace.h> 28 + 29 + #include <asm/unistd.h> 30 + #include <asm/uaccess.h> 31 + #include <asm/processor.h> 32 + #include <asm/csr.h> 33 + #include <asm/string.h> 34 + #include <asm/switch_to.h> 35 + 36 + extern asmlinkage void ret_from_fork(void); 37 + extern asmlinkage void ret_from_kernel_thread(void); 38 + 39 + void arch_cpu_idle(void) 40 + { 41 + wait_for_interrupt(); 42 + local_irq_enable(); 43 + } 44 + 45 + void show_regs(struct pt_regs *regs) 46 + { 47 + show_regs_print_info(KERN_DEFAULT); 48 + 49 + pr_cont("sepc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n", 50 + regs->sepc, regs->ra, regs->sp); 51 + pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n", 52 + regs->gp, regs->tp, regs->t0); 53 + pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n", 54 + regs->t1, regs->t2, regs->s0); 55 + pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n", 56 + regs->s1, regs->a0, regs->a1); 57 + pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n", 58 + regs->a2, regs->a3, regs->a4); 59 + pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n", 60 + regs->a5, regs->a6, regs->a7); 61 + pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n", 62 + regs->s2, regs->s3, regs->s4); 63 + pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n", 64 + regs->s5, regs->s6, regs->s7); 65 + pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n", 66 + regs->s8, regs->s9, regs->s10); 67 + pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n", 68 + regs->s11, regs->t3, regs->t4); 69 + pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n", 70 + regs->t5, regs->t6); 71 + 72 + pr_cont("sstatus: " REG_FMT " sbadaddr: " REG_FMT " scause: " REG_FMT "\n", 73 + regs->sstatus, regs->sbadaddr, regs->scause); 74 + } 75 + 76 + void start_thread(struct pt_regs *regs, unsigned long pc, 77 + unsigned long sp) 78 + { 79 + regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL; 80 + regs->sepc = pc; 81 + regs->sp = sp; 82 + set_fs(USER_DS); 83 + } 84 + 85 + void flush_thread(void) 86 + { 87 + /* 88 + * Reset FPU context 89 + * frm: round to nearest, ties to even (IEEE default) 90 + * fflags: accrued exceptions cleared 91 + */ 92 + memset(&current->thread.fstate, 0, sizeof(current->thread.fstate)); 93 + } 94 + 95 + int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 96 + { 97 + fstate_save(src, task_pt_regs(src)); 98 + *dst = *src; 99 + return 0; 100 + } 101 + 102 + int copy_thread(unsigned long clone_flags, unsigned long usp, 103 + unsigned long arg, struct task_struct *p) 104 + { 105 + struct pt_regs *childregs = task_pt_regs(p); 106 + 107 + /* p->thread holds context to be restored by __switch_to() */ 108 + if (unlikely(p->flags & PF_KTHREAD)) { 109 + /* Kernel thread */ 110 + const register unsigned long gp __asm__ ("gp"); 111 + memset(childregs, 0, sizeof(struct pt_regs)); 112 + childregs->gp = gp; 113 + childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */ 114 + 115 + p->thread.ra = (unsigned long)ret_from_kernel_thread; 116 + p->thread.s[0] = usp; /* fn */ 117 + p->thread.s[1] = arg; 118 + } else { 119 + *childregs = *(current_pt_regs()); 120 + if (usp) /* User fork */ 121 + childregs->sp = usp; 122 + if (clone_flags & CLONE_SETTLS) 123 + childregs->tp = childregs->a5; 124 + childregs->a0 = 0; /* Return value of fork() */ 125 + p->thread.ra = (unsigned long)ret_from_fork; 126 + } 127 + p->thread.sp = (unsigned long)childregs; /* kernel sp */ 128 + return 0; 129 + }