Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky: Exception handling and mm-fault

This patch adds exception handling code, cpuinfo and mm-fault code.

Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>

Guo Ren 081860b9 9143a935

+1546
+326
arch/csky/abiv1/alignment.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/kernel.h> 5 + #include <linux/uaccess.h> 6 + #include <linux/ptrace.h> 7 + 8 + static int align_enable = 1; 9 + static int align_count; 10 + 11 + static inline uint32_t get_ptreg(struct pt_regs *regs, uint32_t rx) 12 + { 13 + return rx == 15 ? regs->lr : *((uint32_t *)&(regs->a0) - 2 + rx); 14 + } 15 + 16 + static inline void put_ptreg(struct pt_regs *regs, uint32_t rx, uint32_t val) 17 + { 18 + if (rx == 15) 19 + regs->lr = val; 20 + else 21 + *((uint32_t *)&(regs->a0) - 2 + rx) = val; 22 + } 23 + 24 + /* 25 + * Get byte-value from addr and set it to *valp. 26 + * 27 + * Success: return 0 28 + * Failure: return 1 29 + */ 30 + static int ldb_asm(uint32_t addr, uint32_t *valp) 31 + { 32 + uint32_t val; 33 + int err; 34 + 35 + if (!access_ok(VERIFY_READ, (void *)addr, 1)) 36 + return 1; 37 + 38 + asm volatile ( 39 + "movi %0, 0\n" 40 + "1:\n" 41 + "ldb %1, (%2)\n" 42 + "br 3f\n" 43 + "2:\n" 44 + "movi %0, 1\n" 45 + "br 3f\n" 46 + ".section __ex_table,\"a\"\n" 47 + ".align 2\n" 48 + ".long 1b, 2b\n" 49 + ".previous\n" 50 + "3:\n" 51 + : "=&r"(err), "=r"(val) 52 + : "r" (addr) 53 + ); 54 + 55 + *valp = val; 56 + 57 + return err; 58 + } 59 + 60 + /* 61 + * Put byte-value to addr. 62 + * 63 + * Success: return 0 64 + * Failure: return 1 65 + */ 66 + static int stb_asm(uint32_t addr, uint32_t val) 67 + { 68 + int err; 69 + 70 + if (!access_ok(VERIFY_WRITE, (void *)addr, 1)) 71 + return 1; 72 + 73 + asm volatile ( 74 + "movi %0, 0\n" 75 + "1:\n" 76 + "stb %1, (%2)\n" 77 + "br 3f\n" 78 + "2:\n" 79 + "movi %0, 1\n" 80 + "br 3f\n" 81 + ".section __ex_table,\"a\"\n" 82 + ".align 2\n" 83 + ".long 1b, 2b\n" 84 + ".previous\n" 85 + "3:\n" 86 + : "=&r"(err) 87 + : "r"(val), "r" (addr) 88 + ); 89 + 90 + return err; 91 + } 92 + 93 + /* 94 + * Get half-word from [rx + imm] 95 + * 96 + * Success: return 0 97 + * Failure: return 1 98 + */ 99 + static int ldh_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) 100 + { 101 + uint32_t byte0, byte1; 102 + 103 + if (ldb_asm(addr, &byte0)) 104 + return 1; 105 + addr += 1; 106 + if (ldb_asm(addr, &byte1)) 107 + return 1; 108 + 109 + byte0 |= byte1 << 8; 110 + put_ptreg(regs, rz, byte0); 111 + 112 + return 0; 113 + } 114 + 115 + /* 116 + * Store half-word to [rx + imm] 117 + * 118 + * Success: return 0 119 + * Failure: return 1 120 + */ 121 + static int sth_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) 122 + { 123 + uint32_t byte0, byte1; 124 + 125 + byte0 = byte1 = get_ptreg(regs, rz); 126 + 127 + byte0 &= 0xff; 128 + 129 + if (stb_asm(addr, byte0)) 130 + return 1; 131 + 132 + addr += 1; 133 + byte1 = (byte1 >> 8) & 0xff; 134 + if (stb_asm(addr, byte1)) 135 + return 1; 136 + 137 + return 0; 138 + } 139 + 140 + /* 141 + * Get word from [rx + imm] 142 + * 143 + * Success: return 0 144 + * Failure: return 1 145 + */ 146 + static int ldw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) 147 + { 148 + uint32_t byte0, byte1, byte2, byte3; 149 + 150 + if (ldb_asm(addr, &byte0)) 151 + return 1; 152 + 153 + addr += 1; 154 + if (ldb_asm(addr, &byte1)) 155 + return 1; 156 + 157 + addr += 1; 158 + if (ldb_asm(addr, &byte2)) 159 + return 1; 160 + 161 + addr += 1; 162 + if (ldb_asm(addr, &byte3)) 163 + return 1; 164 + 165 + byte0 |= byte1 << 8; 166 + byte0 |= byte2 << 16; 167 + byte0 |= byte3 << 24; 168 + 169 + put_ptreg(regs, rz, byte0); 170 + 171 + return 0; 172 + } 173 + 174 + /* 175 + * Store word to [rx + imm] 176 + * 177 + * Success: return 0 178 + * Failure: return 1 179 + */ 180 + static int stw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) 181 + { 182 + uint32_t byte0, byte1, byte2, byte3; 183 + 184 + byte0 = byte1 = byte2 = byte3 = get_ptreg(regs, rz); 185 + 186 + byte0 &= 0xff; 187 + 188 + if (stb_asm(addr, byte0)) 189 + return 1; 190 + 191 + addr += 1; 192 + byte1 = (byte1 >> 8) & 0xff; 193 + if (stb_asm(addr, byte1)) 194 + return 1; 195 + 196 + addr += 1; 197 + byte2 = (byte2 >> 16) & 0xff; 198 + if (stb_asm(addr, byte2)) 199 + return 1; 200 + 201 + addr += 1; 202 + byte3 = (byte3 >> 24) & 0xff; 203 + if (stb_asm(addr, byte3)) 204 + return 1; 205 + 206 + align_count++; 207 + 208 + return 0; 209 + } 210 + 211 + extern int fixup_exception(struct pt_regs *regs); 212 + 213 + #define OP_LDH 0xc000 214 + #define OP_STH 0xd000 215 + #define OP_LDW 0x8000 216 + #define OP_STW 0x9000 217 + 218 + void csky_alignment(struct pt_regs *regs) 219 + { 220 + int ret; 221 + uint16_t tmp; 222 + uint32_t opcode = 0; 223 + uint32_t rx = 0; 224 + uint32_t rz = 0; 225 + uint32_t imm = 0; 226 + uint32_t addr = 0; 227 + 228 + if (!user_mode(regs)) 229 + goto bad_area; 230 + 231 + ret = get_user(tmp, (uint16_t *)instruction_pointer(regs)); 232 + if (ret) { 233 + pr_err("%s get_user failed.\n", __func__); 234 + goto bad_area; 235 + } 236 + 237 + opcode = (uint32_t)tmp; 238 + 239 + rx = opcode & 0xf; 240 + imm = (opcode >> 4) & 0xf; 241 + rz = (opcode >> 8) & 0xf; 242 + opcode &= 0xf000; 243 + 244 + if (rx == 0 || rx == 1 || rz == 0 || rz == 1) 245 + goto bad_area; 246 + 247 + switch (opcode) { 248 + case OP_LDH: 249 + addr = get_ptreg(regs, rx) + (imm << 1); 250 + ret = ldh_c(regs, rz, addr); 251 + break; 252 + case OP_LDW: 253 + addr = get_ptreg(regs, rx) + (imm << 2); 254 + ret = ldw_c(regs, rz, addr); 255 + break; 256 + case OP_STH: 257 + addr = get_ptreg(regs, rx) + (imm << 1); 258 + ret = sth_c(regs, rz, addr); 259 + break; 260 + case OP_STW: 261 + addr = get_ptreg(regs, rx) + (imm << 2); 262 + ret = stw_c(regs, rz, addr); 263 + break; 264 + } 265 + 266 + if (ret) 267 + goto bad_area; 268 + 269 + regs->pc += 2; 270 + 271 + return; 272 + 273 + bad_area: 274 + if (!user_mode(regs)) { 275 + if (fixup_exception(regs)) 276 + return; 277 + 278 + bust_spinlocks(1); 279 + pr_alert("%s opcode: %x, rz: %d, rx: %d, imm: %d, addr: %x.\n", 280 + __func__, opcode, rz, rx, imm, addr); 281 + show_regs(regs); 282 + bust_spinlocks(0); 283 + do_exit(SIGKILL); 284 + } 285 + 286 + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr, current); 287 + } 288 + 289 + static struct ctl_table alignment_tbl[4] = { 290 + { 291 + .procname = "enable", 292 + .data = &align_enable, 293 + .maxlen = sizeof(align_enable), 294 + .mode = 0666, 295 + .proc_handler = &proc_dointvec 296 + }, 297 + { 298 + .procname = "count", 299 + .data = &align_count, 300 + .maxlen = sizeof(align_count), 301 + .mode = 0666, 302 + .proc_handler = &proc_dointvec 303 + }, 304 + {} 305 + }; 306 + 307 + static struct ctl_table sysctl_table[2] = { 308 + { 309 + .procname = "csky_alignment", 310 + .mode = 0555, 311 + .child = alignment_tbl}, 312 + {} 313 + }; 314 + 315 + static struct ctl_path sysctl_path[2] = { 316 + {.procname = "csky"}, 317 + {} 318 + }; 319 + 320 + static int __init csky_alignment_init(void) 321 + { 322 + register_sysctl_paths(sysctl_path, sysctl_table); 323 + return 0; 324 + } 325 + 326 + arch_initcall(csky_alignment_init);
+160
arch/csky/abiv1/inc/abi/entry.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_ENTRY_H 5 + #define __ASM_CSKY_ENTRY_H 6 + 7 + #include <asm/setup.h> 8 + #include <abi/regdef.h> 9 + 10 + #define LSAVE_PC 8 11 + #define LSAVE_PSR 12 12 + #define LSAVE_A0 24 13 + #define LSAVE_A1 28 14 + #define LSAVE_A2 32 15 + #define LSAVE_A3 36 16 + #define LSAVE_A4 40 17 + #define LSAVE_A5 44 18 + 19 + #define EPC_INCREASE 2 20 + #define EPC_KEEP 0 21 + 22 + .macro USPTOKSP 23 + mtcr sp, ss1 24 + mfcr sp, ss0 25 + .endm 26 + 27 + .macro KSPTOUSP 28 + mtcr sp, ss0 29 + mfcr sp, ss1 30 + .endm 31 + 32 + .macro INCTRAP rx 33 + addi \rx, EPC_INCREASE 34 + .endm 35 + 36 + .macro SAVE_ALL epc_inc 37 + mtcr r13, ss2 38 + mfcr r13, epsr 39 + btsti r13, 31 40 + bt 1f 41 + USPTOKSP 42 + 1: 43 + subi sp, 32 44 + subi sp, 32 45 + subi sp, 16 46 + stw r13, (sp, 12) 47 + 48 + stw lr, (sp, 4) 49 + 50 + mfcr lr, epc 51 + movi r13, \epc_inc 52 + add lr, r13 53 + stw lr, (sp, 8) 54 + 55 + mfcr lr, ss1 56 + stw lr, (sp, 16) 57 + 58 + stw a0, (sp, 20) 59 + stw a0, (sp, 24) 60 + stw a1, (sp, 28) 61 + stw a2, (sp, 32) 62 + stw a3, (sp, 36) 63 + 64 + addi sp, 32 65 + addi sp, 8 66 + mfcr r13, ss2 67 + stw r6, (sp) 68 + stw r7, (sp, 4) 69 + stw r8, (sp, 8) 70 + stw r9, (sp, 12) 71 + stw r10, (sp, 16) 72 + stw r11, (sp, 20) 73 + stw r12, (sp, 24) 74 + stw r13, (sp, 28) 75 + stw r14, (sp, 32) 76 + stw r1, (sp, 36) 77 + subi sp, 32 78 + subi sp, 8 79 + .endm 80 + 81 + .macro RESTORE_ALL 82 + psrclr ie 83 + ldw lr, (sp, 4) 84 + ldw a0, (sp, 8) 85 + mtcr a0, epc 86 + ldw a0, (sp, 12) 87 + mtcr a0, epsr 88 + btsti a0, 31 89 + ldw a0, (sp, 16) 90 + mtcr a0, ss1 91 + 92 + ldw a0, (sp, 24) 93 + ldw a1, (sp, 28) 94 + ldw a2, (sp, 32) 95 + ldw a3, (sp, 36) 96 + 97 + addi sp, 32 98 + addi sp, 8 99 + ldw r6, (sp) 100 + ldw r7, (sp, 4) 101 + ldw r8, (sp, 8) 102 + ldw r9, (sp, 12) 103 + ldw r10, (sp, 16) 104 + ldw r11, (sp, 20) 105 + ldw r12, (sp, 24) 106 + ldw r13, (sp, 28) 107 + ldw r14, (sp, 32) 108 + ldw r1, (sp, 36) 109 + addi sp, 32 110 + addi sp, 8 111 + 112 + bt 1f 113 + KSPTOUSP 114 + 1: 115 + rte 116 + .endm 117 + 118 + .macro SAVE_SWITCH_STACK 119 + subi sp, 32 120 + stm r8-r15, (sp) 121 + .endm 122 + 123 + .macro RESTORE_SWITCH_STACK 124 + ldm r8-r15, (sp) 125 + addi sp, 32 126 + .endm 127 + 128 + /* MMU registers operators. */ 129 + .macro RD_MIR rx 130 + cprcr \rx, cpcr0 131 + .endm 132 + 133 + .macro RD_MEH rx 134 + cprcr \rx, cpcr4 135 + .endm 136 + 137 + .macro RD_MCIR rx 138 + cprcr \rx, cpcr8 139 + .endm 140 + 141 + .macro RD_PGDR rx 142 + cprcr \rx, cpcr29 143 + .endm 144 + 145 + .macro WR_MEH rx 146 + cpwcr \rx, cpcr4 147 + .endm 148 + 149 + .macro WR_MCIR rx 150 + cpwcr \rx, cpcr8 151 + .endm 152 + 153 + .macro SETUP_MMU rx 154 + lrw \rx, PHYS_OFFSET | 0xe 155 + cpwcr \rx, cpcr30 156 + lrw \rx, (PHYS_OFFSET + 0x20000000) | 0xe 157 + cpwcr \rx, cpcr31 158 + .endm 159 + 160 + #endif /* __ASM_CSKY_ENTRY_H */
+156
arch/csky/abiv2/inc/abi/entry.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_ENTRY_H 5 + #define __ASM_CSKY_ENTRY_H 6 + 7 + #include <asm/setup.h> 8 + #include <abi/regdef.h> 9 + 10 + #define LSAVE_PC 8 11 + #define LSAVE_PSR 12 12 + #define LSAVE_A0 24 13 + #define LSAVE_A1 28 14 + #define LSAVE_A2 32 15 + #define LSAVE_A3 36 16 + 17 + #define EPC_INCREASE 4 18 + #define EPC_KEEP 0 19 + 20 + #define KSPTOUSP 21 + #define USPTOKSP 22 + 23 + #define usp cr<14, 1> 24 + 25 + .macro INCTRAP rx 26 + addi \rx, EPC_INCREASE 27 + .endm 28 + 29 + .macro SAVE_ALL epc_inc 30 + subi sp, 152 31 + stw tls, (sp, 0) 32 + stw lr, (sp, 4) 33 + 34 + mfcr lr, epc 35 + movi tls, \epc_inc 36 + add lr, tls 37 + stw lr, (sp, 8) 38 + 39 + mfcr lr, epsr 40 + stw lr, (sp, 12) 41 + mfcr lr, usp 42 + stw lr, (sp, 16) 43 + 44 + stw a0, (sp, 20) 45 + stw a0, (sp, 24) 46 + stw a1, (sp, 28) 47 + stw a2, (sp, 32) 48 + stw a3, (sp, 36) 49 + 50 + addi sp, 40 51 + stm r4-r13, (sp) 52 + 53 + addi sp, 40 54 + stm r16-r30, (sp) 55 + #ifdef CONFIG_CPU_HAS_HILO 56 + mfhi lr 57 + stw lr, (sp, 60) 58 + mflo lr 59 + stw lr, (sp, 64) 60 + #endif 61 + subi sp, 80 62 + .endm 63 + 64 + .macro RESTORE_ALL 65 + psrclr ie 66 + ldw tls, (sp, 0) 67 + ldw lr, (sp, 4) 68 + ldw a0, (sp, 8) 69 + mtcr a0, epc 70 + ldw a0, (sp, 12) 71 + mtcr a0, epsr 72 + ldw a0, (sp, 16) 73 + mtcr a0, usp 74 + 75 + #ifdef CONFIG_CPU_HAS_HILO 76 + ldw a0, (sp, 140) 77 + mthi a0 78 + ldw a0, (sp, 144) 79 + mtlo a0 80 + #endif 81 + 82 + ldw a0, (sp, 24) 83 + ldw a1, (sp, 28) 84 + ldw a2, (sp, 32) 85 + ldw a3, (sp, 36) 86 + 87 + addi sp, 40 88 + ldm r4-r13, (sp) 89 + addi sp, 40 90 + ldm r16-r30, (sp) 91 + addi sp, 72 92 + rte 93 + .endm 94 + 95 + .macro SAVE_SWITCH_STACK 96 + subi sp, 64 97 + stm r4-r11, (sp) 98 + stw r15, (sp, 32) 99 + stw r16, (sp, 36) 100 + stw r17, (sp, 40) 101 + stw r26, (sp, 44) 102 + stw r27, (sp, 48) 103 + stw r28, (sp, 52) 104 + stw r29, (sp, 56) 105 + stw r30, (sp, 60) 106 + .endm 107 + 108 + .macro RESTORE_SWITCH_STACK 109 + ldm r4-r11, (sp) 110 + ldw r15, (sp, 32) 111 + ldw r16, (sp, 36) 112 + ldw r17, (sp, 40) 113 + ldw r26, (sp, 44) 114 + ldw r27, (sp, 48) 115 + ldw r28, (sp, 52) 116 + ldw r29, (sp, 56) 117 + ldw r30, (sp, 60) 118 + addi sp, 64 119 + .endm 120 + 121 + /* MMU registers operators. */ 122 + .macro RD_MIR rx 123 + mfcr \rx, cr<0, 15> 124 + .endm 125 + 126 + .macro RD_MEH rx 127 + mfcr \rx, cr<4, 15> 128 + .endm 129 + 130 + .macro RD_MCIR rx 131 + mfcr \rx, cr<8, 15> 132 + .endm 133 + 134 + .macro RD_PGDR rx 135 + mfcr \rx, cr<29, 15> 136 + .endm 137 + 138 + .macro RD_PGDR_K rx 139 + mfcr \rx, cr<28, 15> 140 + .endm 141 + 142 + .macro WR_MEH rx 143 + mtcr \rx, cr<4, 15> 144 + .endm 145 + 146 + .macro WR_MCIR rx 147 + mtcr \rx, cr<8, 15> 148 + .endm 149 + 150 + .macro SETUP_MMU rx 151 + lrw \rx, PHYS_OFFSET | 0xe 152 + mtcr \rx, cr<30, 15> 153 + lrw \rx, (PHYS_OFFSET + 0x20000000) | 0xe 154 + mtcr \rx, cr<31, 15> 155 + .endm 156 + #endif /* __ASM_CSKY_ENTRY_H */
+44
arch/csky/include/asm/traps.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #ifndef __ASM_CSKY_TRAPS_H 5 + #define __ASM_CSKY_TRAPS_H 6 + 7 + #define VEC_RESET 0 8 + #define VEC_ALIGN 1 9 + #define VEC_ACCESS 2 10 + #define VEC_ZERODIV 3 11 + #define VEC_ILLEGAL 4 12 + #define VEC_PRIV 5 13 + #define VEC_TRACE 6 14 + #define VEC_BREAKPOINT 7 15 + #define VEC_UNRECOVER 8 16 + #define VEC_SOFTRESET 9 17 + #define VEC_AUTOVEC 10 18 + #define VEC_FAUTOVEC 11 19 + #define VEC_HWACCEL 12 20 + 21 + #define VEC_TLBMISS 14 22 + #define VEC_TLBMODIFIED 15 23 + 24 + #define VEC_TRAP0 16 25 + #define VEC_TRAP1 17 26 + #define VEC_TRAP2 18 27 + #define VEC_TRAP3 19 28 + 29 + #define VEC_TLBINVALIDL 20 30 + #define VEC_TLBINVALIDS 21 31 + 32 + #define VEC_PRFL 29 33 + #define VEC_FPE 30 34 + 35 + extern void *vec_base[]; 36 + 37 + #define VEC_INIT(i, func) \ 38 + do { \ 39 + vec_base[i] = (void *)func; \ 40 + } while (0) 41 + 42 + void csky_alignment(struct pt_regs *regs); 43 + 44 + #endif /* __ASM_CSKY_TRAPS_H */
+4
arch/csky/include/asm/unistd.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <uapi/asm/unistd.h>
+79
arch/csky/kernel/cpu-probe.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/of.h> 5 + #include <linux/init.h> 6 + #include <linux/seq_file.h> 7 + #include <linux/memblock.h> 8 + 9 + #include <abi/reg_ops.h> 10 + 11 + static void percpu_print(void *arg) 12 + { 13 + struct seq_file *m = (struct seq_file *)arg; 14 + unsigned int cur, next, i; 15 + 16 + seq_printf(m, "processor : %d\n", smp_processor_id()); 17 + seq_printf(m, "C-SKY CPU model : %s\n", CSKYCPU_DEF_NAME); 18 + 19 + /* read processor id, max is 100 */ 20 + cur = mfcr("cr13"); 21 + for (i = 0; i < 100; i++) { 22 + seq_printf(m, "product info[%d] : 0x%08x\n", i, cur); 23 + 24 + next = mfcr("cr13"); 25 + 26 + /* some CPU only has one id reg */ 27 + if (cur == next) 28 + break; 29 + 30 + cur = next; 31 + 32 + /* cpid index is 31-28, reset */ 33 + if (!(next >> 28)) { 34 + while ((mfcr("cr13") >> 28) != i); 35 + break; 36 + } 37 + } 38 + 39 + /* CPU feature regs, setup by bootloader or gdbinit */ 40 + seq_printf(m, "hint (CPU funcs): 0x%08x\n", mfcr_hint()); 41 + seq_printf(m, "ccr (L1C & MMU): 0x%08x\n", mfcr("cr18")); 42 + seq_printf(m, "ccr2 (L2C) : 0x%08x\n", mfcr_ccr2()); 43 + seq_printf(m, "\n"); 44 + } 45 + 46 + static int c_show(struct seq_file *m, void *v) 47 + { 48 + int cpu; 49 + 50 + for_each_online_cpu(cpu) 51 + smp_call_function_single(cpu, percpu_print, m, true); 52 + 53 + #ifdef CSKY_ARCH_VERSION 54 + seq_printf(m, "arch-version : %s\n", CSKY_ARCH_VERSION); 55 + seq_printf(m, "\n"); 56 + #endif 57 + 58 + return 0; 59 + } 60 + 61 + static void *c_start(struct seq_file *m, loff_t *pos) 62 + { 63 + return *pos < 1 ? (void *)1 : NULL; 64 + } 65 + 66 + static void *c_next(struct seq_file *m, void *v, loff_t *pos) 67 + { 68 + ++*pos; 69 + return NULL; 70 + } 71 + 72 + static void c_stop(struct seq_file *m, void *v) {} 73 + 74 + const struct seq_operations cpuinfo_op = { 75 + .start = c_start, 76 + .next = c_next, 77 + .stop = c_stop, 78 + .show = c_show, 79 + };
+396
arch/csky/kernel/entry.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/linkage.h> 5 + #include <abi/entry.h> 6 + #include <abi/pgtable-bits.h> 7 + #include <asm/errno.h> 8 + #include <asm/setup.h> 9 + #include <asm/unistd.h> 10 + #include <asm/asm-offsets.h> 11 + #include <linux/threads.h> 12 + #include <asm/setup.h> 13 + #include <asm/page.h> 14 + #include <asm/thread_info.h> 15 + 16 + #define PTE_INDX_MSK 0xffc 17 + #define PTE_INDX_SHIFT 10 18 + #define _PGDIR_SHIFT 22 19 + 20 + .macro tlbop_begin name, val0, val1, val2 21 + ENTRY(csky_\name) 22 + mtcr a3, ss2 23 + mtcr r6, ss3 24 + mtcr a2, ss4 25 + 26 + RD_PGDR r6 27 + RD_MEH a3 28 + #ifdef CONFIG_CPU_HAS_TLBI 29 + tlbi.vaas a3 30 + sync.is 31 + 32 + btsti a3, 31 33 + bf 1f 34 + RD_PGDR_K r6 35 + 1: 36 + #else 37 + bgeni a2, 31 38 + WR_MCIR a2 39 + bgeni a2, 25 40 + WR_MCIR a2 41 + #endif 42 + bclri r6, 0 43 + lrw a2, PHYS_OFFSET 44 + subu r6, a2 45 + bseti r6, 31 46 + 47 + mov a2, a3 48 + lsri a2, _PGDIR_SHIFT 49 + lsli a2, 2 50 + addu r6, a2 51 + ldw r6, (r6) 52 + 53 + lrw a2, PHYS_OFFSET 54 + subu r6, a2 55 + bseti r6, 31 56 + 57 + lsri a3, PTE_INDX_SHIFT 58 + lrw a2, PTE_INDX_MSK 59 + and a3, a2 60 + addu r6, a3 61 + ldw a3, (r6) 62 + 63 + movi a2, (_PAGE_PRESENT | \val0) 64 + and a3, a2 65 + cmpne a3, a2 66 + bt \name 67 + 68 + /* First read/write the page, just update the flags */ 69 + ldw a3, (r6) 70 + bgeni a2, PAGE_VALID_BIT 71 + bseti a2, PAGE_ACCESSED_BIT 72 + bseti a2, \val1 73 + bseti a2, \val2 74 + or a3, a2 75 + stw a3, (r6) 76 + 77 + /* Some cpu tlb-hardrefill bypass the cache */ 78 + #ifdef CONFIG_CPU_NEED_TLBSYNC 79 + movi a2, 0x22 80 + bseti a2, 6 81 + mtcr r6, cr22 82 + mtcr a2, cr17 83 + sync 84 + #endif 85 + 86 + mfcr a3, ss2 87 + mfcr r6, ss3 88 + mfcr a2, ss4 89 + rte 90 + \name: 91 + mfcr a3, ss2 92 + mfcr r6, ss3 93 + mfcr a2, ss4 94 + SAVE_ALL EPC_KEEP 95 + .endm 96 + .macro tlbop_end is_write 97 + RD_MEH a2 98 + psrset ee, ie 99 + mov a0, sp 100 + movi a1, \is_write 101 + jbsr do_page_fault 102 + movi r11_sig, 0 /* r11 = 0, Not a syscall. */ 103 + jmpi ret_from_exception 104 + .endm 105 + 106 + .text 107 + 108 + tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT 109 + tlbop_end 0 110 + 111 + tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT 112 + tlbop_end 1 113 + 114 + tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT 115 + #ifndef CONFIG_CPU_HAS_LDSTEX 116 + jbsr csky_cmpxchg_fixup 117 + #endif 118 + tlbop_end 1 119 + 120 + ENTRY(csky_systemcall) 121 + SAVE_ALL EPC_INCREASE 122 + 123 + psrset ee, ie 124 + 125 + /* Stack frame for syscall, origin call set_esp0 */ 126 + mov r12, sp 127 + 128 + bmaski r11, 13 129 + andn r12, r11 130 + bgeni r11, 9 131 + addi r11, 32 132 + addu r12, r11 133 + st sp, (r12, 0) 134 + 135 + lrw r11, __NR_syscalls 136 + cmphs syscallid, r11 /* Check nr of syscall */ 137 + bt ret_from_exception 138 + 139 + lrw r13, sys_call_table 140 + ixw r13, syscallid 141 + ldw r11, (r13) 142 + cmpnei r11, 0 143 + bf ret_from_exception 144 + 145 + mov r9, sp 146 + bmaski r10, THREAD_SHIFT 147 + andn r9, r10 148 + ldw r8, (r9, TINFO_FLAGS) 149 + btsti r8, TIF_SYSCALL_TRACE 150 + bt 1f 151 + #if defined(__CSKYABIV2__) 152 + subi sp, 8 153 + stw r5, (sp, 0x4) 154 + stw r4, (sp, 0x0) 155 + jsr r11 /* Do system call */ 156 + addi sp, 8 157 + #else 158 + jsr r11 159 + #endif 160 + stw a0, (sp, LSAVE_A0) /* Save return value */ 161 + jmpi ret_from_exception 162 + 163 + 1: 164 + movi a0, 0 /* enter system call */ 165 + mov a1, sp /* sp = pt_regs pointer */ 166 + jbsr syscall_trace 167 + /* Prepare args before do system call */ 168 + ldw a0, (sp, LSAVE_A0) 169 + ldw a1, (sp, LSAVE_A1) 170 + ldw a2, (sp, LSAVE_A2) 171 + ldw a3, (sp, LSAVE_A3) 172 + #if defined(__CSKYABIV2__) 173 + subi sp, 8 174 + stw r5, (sp, 0x4) 175 + stw r4, (sp, 0x0) 176 + #else 177 + ldw r6, (sp, LSAVE_A4) 178 + ldw r7, (sp, LSAVE_A5) 179 + #endif 180 + jsr r11 /* Do system call */ 181 + #if defined(__CSKYABIV2__) 182 + addi sp, 8 183 + #endif 184 + stw a0, (sp, LSAVE_A0) /* Save return value */ 185 + 186 + movi a0, 1 /* leave system call */ 187 + mov a1, sp /* sp = pt_regs pointer */ 188 + jbsr syscall_trace 189 + 190 + syscall_exit_work: 191 + ld syscallid, (sp, LSAVE_PSR) 192 + btsti syscallid, 31 193 + bt 2f 194 + 195 + jmpi resume_userspace 196 + 197 + 2: RESTORE_ALL 198 + 199 + ENTRY(ret_from_kernel_thread) 200 + jbsr schedule_tail 201 + mov a0, r8 202 + jsr r9 203 + jbsr ret_from_exception 204 + 205 + ENTRY(ret_from_fork) 206 + jbsr schedule_tail 207 + mov r9, sp 208 + bmaski r10, THREAD_SHIFT 209 + andn r9, r10 210 + ldw r8, (r9, TINFO_FLAGS) 211 + movi r11_sig, 1 212 + btsti r8, TIF_SYSCALL_TRACE 213 + bf 3f 214 + movi a0, 1 215 + mov a1, sp /* sp = pt_regs pointer */ 216 + jbsr syscall_trace 217 + 3: 218 + jbsr ret_from_exception 219 + 220 + ret_from_exception: 221 + ld syscallid, (sp, LSAVE_PSR) 222 + btsti syscallid, 31 223 + bt 1f 224 + 225 + /* 226 + * Load address of current->thread_info, Then get address of task_struct 227 + * Get task_needreshed in task_struct 228 + */ 229 + mov r9, sp 230 + bmaski r10, THREAD_SHIFT 231 + andn r9, r10 232 + 233 + resume_userspace: 234 + ldw r8, (r9, TINFO_FLAGS) 235 + andi r8, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) 236 + cmpnei r8, 0 237 + bt exit_work 238 + 1: RESTORE_ALL 239 + 240 + exit_work: 241 + mov a0, sp /* Stack address is arg[0] */ 242 + jbsr set_esp0 /* Call C level */ 243 + btsti r8, TIF_NEED_RESCHED 244 + bt work_resched 245 + /* If thread_info->flag is empty, RESTORE_ALL */ 246 + cmpnei r8, 0 247 + bf 1b 248 + mov a1, sp 249 + mov a0, r8 250 + mov a2, r11_sig /* syscall? */ 251 + btsti r8, TIF_SIGPENDING /* delivering a signal? */ 252 + /* prevent further restarts(set r11 = 0) */ 253 + clrt r11_sig 254 + jbsr do_notify_resume /* do signals */ 255 + br resume_userspace 256 + 257 + work_resched: 258 + lrw syscallid, ret_from_exception 259 + mov r15, syscallid /* Return address in link */ 260 + jmpi schedule 261 + 262 + ENTRY(sys_rt_sigreturn) 263 + movi r11_sig, 0 264 + jmpi do_rt_sigreturn 265 + 266 + ENTRY(csky_trap) 267 + SAVE_ALL EPC_KEEP 268 + psrset ee 269 + movi r11_sig, 0 /* r11 = 0, Not a syscall. */ 270 + mov a0, sp /* Push Stack pointer arg */ 271 + jbsr trap_c /* Call C-level trap handler */ 272 + jmpi ret_from_exception 273 + 274 + /* 275 +  * Prototype from libc for abiv1: 276 +  * register unsigned int __result asm("a0"); 277 +  * asm( "trap 3" :"=r"(__result)::); 278 +  */ 279 + ENTRY(csky_get_tls) 280 + USPTOKSP 281 + 282 + /* increase epc for continue */ 283 + mfcr a0, epc 284 + INCTRAP a0 285 + mtcr a0, epc 286 + 287 + /* get current task thread_info with kernel 8K stack */ 288 + bmaski a0, THREAD_SHIFT 289 + not a0 290 + subi sp, 1 291 + and a0, sp 292 + addi sp, 1 293 + 294 + /* get tls */ 295 + ldw a0, (a0, TINFO_TP_VALUE) 296 + 297 + KSPTOUSP 298 + rte 299 + 300 + ENTRY(csky_irq) 301 + SAVE_ALL EPC_KEEP 302 + psrset ee 303 + movi r11_sig, 0 /* r11 = 0, Not a syscall. */ 304 + 305 + #ifdef CONFIG_PREEMPT 306 + mov r9, sp /* Get current stack pointer */ 307 + bmaski r10, THREAD_SHIFT 308 + andn r9, r10 /* Get thread_info */ 309 + 310 + /* 311 + * Get task_struct->stack.preempt_count for current, 312 + * and increase 1. 313 + */ 314 + ldw r8, (r9, TINFO_PREEMPT) 315 + addi r8, 1 316 + stw r8, (r9, TINFO_PREEMPT) 317 + #endif 318 + 319 + mov a0, sp 320 + jbsr csky_do_IRQ 321 + 322 + #ifdef CONFIG_PREEMPT 323 + subi r8, 1 324 + stw r8, (r9, TINFO_PREEMPT) 325 + cmpnei r8, 0 326 + bt 2f 327 + ldw r8, (r9, TINFO_FLAGS) 328 + btsti r8, TIF_NEED_RESCHED 329 + bf 2f 330 + 1: 331 + jbsr preempt_schedule_irq /* irq en/disable is done inside */ 332 + ldw r7, (r9, TINFO_FLAGS) /* get new tasks TI_FLAGS */ 333 + btsti r7, TIF_NEED_RESCHED 334 + bt 1b /* go again */ 335 + #endif 336 + 2: 337 + jmpi ret_from_exception 338 + 339 + /* 340 + * a0 = prev task_struct * 341 + * a1 = next task_struct * 342 + * a0 = return next 343 + */ 344 + ENTRY(__switch_to) 345 + lrw a3, TASK_THREAD 346 + addu a3, a0 347 + 348 + mfcr a2, psr /* Save PSR value */ 349 + stw a2, (a3, THREAD_SR) /* Save PSR in task struct */ 350 + bclri a2, 6 /* Disable interrupts */ 351 + mtcr a2, psr 352 + 353 + SAVE_SWITCH_STACK 354 + 355 + stw sp, (a3, THREAD_KSP) 356 + 357 + #ifdef CONFIG_CPU_HAS_HILO 358 + lrw r10, THREAD_DSPHI 359 + add r10, a3 360 + mfhi r6 361 + mflo r7 362 + stw r6, (r10, 0) /* THREAD_DSPHI */ 363 + stw r7, (r10, 4) /* THREAD_DSPLO */ 364 + mfcr r6, cr14 365 + stw r6, (r10, 8) /* THREAD_DSPCSR */ 366 + #endif 367 + 368 + /* Set up next process to run */ 369 + lrw a3, TASK_THREAD 370 + addu a3, a1 371 + 372 + ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */ 373 + 374 + #ifdef CONFIG_CPU_HAS_HILO 375 + lrw r10, THREAD_DSPHI 376 + add r10, a3 377 + ldw r6, (r10, 8) /* THREAD_DSPCSR */ 378 + mtcr r6, cr14 379 + ldw r6, (r10, 0) /* THREAD_DSPHI */ 380 + ldw r7, (r10, 4) /* THREAD_DSPLO */ 381 + mthi r6 382 + mtlo r7 383 + #endif 384 + 385 + ldw a2, (a3, THREAD_SR) /* Set next PSR */ 386 + mtcr a2, psr 387 + 388 + #if defined(__CSKYABIV2__) 389 + addi r7, a1, TASK_THREAD_INFO 390 + ldw tls, (r7, TINFO_TP_VALUE) 391 + #endif 392 + 393 + RESTORE_SWITCH_STACK 394 + 395 + rts 396 + ENDPROC(__switch_to)
+169
arch/csky/kernel/traps.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/sched.h> 5 + #include <linux/signal.h> 6 + #include <linux/kernel.h> 7 + #include <linux/mm.h> 8 + #include <linux/module.h> 9 + #include <linux/user.h> 10 + #include <linux/string.h> 11 + #include <linux/linkage.h> 12 + #include <linux/init.h> 13 + #include <linux/ptrace.h> 14 + #include <linux/kallsyms.h> 15 + #include <linux/rtc.h> 16 + #include <linux/uaccess.h> 17 + 18 + #include <asm/setup.h> 19 + #include <asm/traps.h> 20 + #include <asm/pgalloc.h> 21 + #include <asm/siginfo.h> 22 + 23 + #include <asm/mmu_context.h> 24 + 25 + #ifdef CONFIG_CPU_HAS_FPU 26 + #include <abi/fpu.h> 27 + #endif 28 + 29 + /* Defined in entry.S */ 30 + asmlinkage void csky_trap(void); 31 + 32 + asmlinkage void csky_systemcall(void); 33 + asmlinkage void csky_cmpxchg(void); 34 + asmlinkage void csky_get_tls(void); 35 + asmlinkage void csky_irq(void); 36 + 37 + asmlinkage void csky_tlbinvalidl(void); 38 + asmlinkage void csky_tlbinvalids(void); 39 + asmlinkage void csky_tlbmodified(void); 40 + 41 + /* Defined in head.S */ 42 + asmlinkage void _start_smp_secondary(void); 43 + 44 + void __init pre_trap_init(void) 45 + { 46 + int i; 47 + 48 + mtcr("vbr", vec_base); 49 + 50 + for (i = 1; i < 128; i++) 51 + VEC_INIT(i, csky_trap); 52 + } 53 + 54 + void __init trap_init(void) 55 + { 56 + VEC_INIT(VEC_AUTOVEC, csky_irq); 57 + 58 + /* setup trap0 trap2 trap3 */ 59 + VEC_INIT(VEC_TRAP0, csky_systemcall); 60 + VEC_INIT(VEC_TRAP2, csky_cmpxchg); 61 + VEC_INIT(VEC_TRAP3, csky_get_tls); 62 + 63 + /* setup MMU TLB exception */ 64 + VEC_INIT(VEC_TLBINVALIDL, csky_tlbinvalidl); 65 + VEC_INIT(VEC_TLBINVALIDS, csky_tlbinvalids); 66 + VEC_INIT(VEC_TLBMODIFIED, csky_tlbmodified); 67 + 68 + #ifdef CONFIG_CPU_HAS_FPU 69 + init_fpu(); 70 + #endif 71 + 72 + #ifdef CONFIG_SMP 73 + mtcr("cr<28, 0>", virt_to_phys(vec_base)); 74 + 75 + VEC_INIT(VEC_RESET, (void *)virt_to_phys(_start_smp_secondary)); 76 + #endif 77 + } 78 + 79 + void die_if_kernel(char *str, struct pt_regs *regs, int nr) 80 + { 81 + if (user_mode(regs)) 82 + return; 83 + 84 + console_verbose(); 85 + pr_err("%s: %08x\n", str, nr); 86 + show_regs(regs); 87 + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 88 + do_exit(SIGSEGV); 89 + } 90 + 91 + void buserr(struct pt_regs *regs) 92 + { 93 + #ifdef CONFIG_CPU_CK810 94 + static unsigned long prev_pc; 95 + 96 + if ((regs->pc == prev_pc) && prev_pc != 0) { 97 + prev_pc = 0; 98 + } else { 99 + prev_pc = regs->pc; 100 + return; 101 + } 102 + #endif 103 + 104 + die_if_kernel("Kernel mode BUS error", regs, 0); 105 + 106 + pr_err("User mode Bus Error\n"); 107 + show_regs(regs); 108 + 109 + current->thread.esp0 = (unsigned long) regs; 110 + force_sig_fault(SIGSEGV, 0, (void __user *)regs->pc, current); 111 + } 112 + 113 + #define USR_BKPT 0x1464 114 + asmlinkage void trap_c(struct pt_regs *regs) 115 + { 116 + int sig; 117 + unsigned long vector; 118 + siginfo_t info; 119 + 120 + vector = (mfcr("psr") >> 16) & 0xff; 121 + 122 + switch (vector) { 123 + case VEC_ZERODIV: 124 + sig = SIGFPE; 125 + break; 126 + /* ptrace */ 127 + case VEC_TRACE: 128 + info.si_code = TRAP_TRACE; 129 + sig = SIGTRAP; 130 + break; 131 + case VEC_ILLEGAL: 132 + #ifndef CONFIG_CPU_NO_USER_BKPT 133 + if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT) 134 + #endif 135 + { 136 + sig = SIGILL; 137 + break; 138 + } 139 + /* gdbserver breakpoint */ 140 + case VEC_TRAP1: 141 + /* jtagserver breakpoint */ 142 + case VEC_BREAKPOINT: 143 + info.si_code = TRAP_BRKPT; 144 + sig = SIGTRAP; 145 + break; 146 + case VEC_ACCESS: 147 + return buserr(regs); 148 + #ifdef CONFIG_CPU_NEED_SOFTALIGN 149 + case VEC_ALIGN: 150 + return csky_alignment(regs); 151 + #endif 152 + #ifdef CONFIG_CPU_HAS_FPU 153 + case VEC_FPE: 154 + return fpu_fpe(regs); 155 + case VEC_PRIV: 156 + if (fpu_libc_helper(regs)) 157 + return; 158 + #endif 159 + default: 160 + sig = SIGSEGV; 161 + break; 162 + } 163 + send_sig(sig, current, 0); 164 + } 165 + 166 + asmlinkage void set_esp0(unsigned long ssp) 167 + { 168 + current->thread.esp0 = ssp; 169 + }
+212
arch/csky/mm/fault.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 + 4 + #include <linux/signal.h> 5 + #include <linux/module.h> 6 + #include <linux/sched.h> 7 + #include <linux/interrupt.h> 8 + #include <linux/kernel.h> 9 + #include <linux/errno.h> 10 + #include <linux/string.h> 11 + #include <linux/types.h> 12 + #include <linux/ptrace.h> 13 + #include <linux/mman.h> 14 + #include <linux/mm.h> 15 + #include <linux/smp.h> 16 + #include <linux/version.h> 17 + #include <linux/vt_kern.h> 18 + #include <linux/kernel.h> 19 + #include <linux/extable.h> 20 + #include <linux/uaccess.h> 21 + 22 + #include <asm/hardirq.h> 23 + #include <asm/mmu_context.h> 24 + #include <asm/traps.h> 25 + #include <asm/page.h> 26 + 27 + int fixup_exception(struct pt_regs *regs) 28 + { 29 + const struct exception_table_entry *fixup; 30 + 31 + fixup = search_exception_tables(instruction_pointer(regs)); 32 + if (fixup) { 33 + regs->pc = fixup->nextinsn; 34 + 35 + return 1; 36 + } 37 + 38 + return 0; 39 + } 40 + 41 + /* 42 + * This routine handles page faults. It determines the address, 43 + * and the problem, and then passes it off to one of the appropriate 44 + * routines. 45 + */ 46 + asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, 47 + unsigned long mmu_meh) 48 + { 49 + struct vm_area_struct *vma = NULL; 50 + struct task_struct *tsk = current; 51 + struct mm_struct *mm = tsk->mm; 52 + int si_code; 53 + int fault; 54 + unsigned long address = mmu_meh & PAGE_MASK; 55 + 56 + si_code = SEGV_MAPERR; 57 + 58 + #ifndef CONFIG_CPU_HAS_TLBI 59 + /* 60 + * We fault-in kernel-space virtual memory on-demand. The 61 + * 'reference' page table is init_mm.pgd. 62 + * 63 + * NOTE! We MUST NOT take any locks for this case. We may 64 + * be in an interrupt or a critical region, and should 65 + * only copy the information from the master page table, 66 + * nothing more. 67 + */ 68 + if (unlikely(address >= VMALLOC_START) && 69 + unlikely(address <= VMALLOC_END)) { 70 + /* 71 + * Synchronize this task's top level page-table 72 + * with the 'reference' page table. 73 + * 74 + * Do _not_ use "tsk" here. We might be inside 75 + * an interrupt in the middle of a task switch.. 76 + */ 77 + int offset = __pgd_offset(address); 78 + pgd_t *pgd, *pgd_k; 79 + pud_t *pud, *pud_k; 80 + pmd_t *pmd, *pmd_k; 81 + pte_t *pte_k; 82 + 83 + unsigned long pgd_base; 84 + 85 + pgd_base = tlb_get_pgd(); 86 + pgd = (pgd_t *)pgd_base + offset; 87 + pgd_k = init_mm.pgd + offset; 88 + 89 + if (!pgd_present(*pgd_k)) 90 + goto no_context; 91 + set_pgd(pgd, *pgd_k); 92 + 93 + pud = (pud_t *)pgd; 94 + pud_k = (pud_t *)pgd_k; 95 + if (!pud_present(*pud_k)) 96 + goto no_context; 97 + 98 + pmd = pmd_offset(pud, address); 99 + pmd_k = pmd_offset(pud_k, address); 100 + if (!pmd_present(*pmd_k)) 101 + goto no_context; 102 + set_pmd(pmd, *pmd_k); 103 + 104 + pte_k = pte_offset_kernel(pmd_k, address); 105 + if (!pte_present(*pte_k)) 106 + goto no_context; 107 + return; 108 + } 109 + #endif 110 + /* 111 + * If we're in an interrupt or have no user 112 + * context, we must not take the fault.. 113 + */ 114 + if (in_atomic() || !mm) 115 + goto bad_area_nosemaphore; 116 + 117 + down_read(&mm->mmap_sem); 118 + vma = find_vma(mm, address); 119 + if (!vma) 120 + goto bad_area; 121 + if (vma->vm_start <= address) 122 + goto good_area; 123 + if (!(vma->vm_flags & VM_GROWSDOWN)) 124 + goto bad_area; 125 + if (expand_stack(vma, address)) 126 + goto bad_area; 127 + /* 128 + * Ok, we have a good vm_area for this memory access, so 129 + * we can handle it.. 130 + */ 131 + good_area: 132 + si_code = SEGV_ACCERR; 133 + 134 + if (write) { 135 + if (!(vma->vm_flags & VM_WRITE)) 136 + goto bad_area; 137 + } else { 138 + if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) 139 + goto bad_area; 140 + } 141 + 142 + /* 143 + * If for any reason at all we couldn't handle the fault, 144 + * make sure we exit gracefully rather than endlessly redo 145 + * the fault. 146 + */ 147 + fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0); 148 + if (unlikely(fault & VM_FAULT_ERROR)) { 149 + if (fault & VM_FAULT_OOM) 150 + goto out_of_memory; 151 + else if (fault & VM_FAULT_SIGBUS) 152 + goto do_sigbus; 153 + else if (fault & VM_FAULT_SIGSEGV) 154 + goto bad_area; 155 + BUG(); 156 + } 157 + if (fault & VM_FAULT_MAJOR) 158 + tsk->maj_flt++; 159 + else 160 + tsk->min_flt++; 161 + 162 + up_read(&mm->mmap_sem); 163 + return; 164 + 165 + /* 166 + * Something tried to access memory that isn't in our memory map.. 167 + * Fix it, but check if it's kernel or user first.. 168 + */ 169 + bad_area: 170 + up_read(&mm->mmap_sem); 171 + 172 + bad_area_nosemaphore: 173 + /* User mode accesses just cause a SIGSEGV */ 174 + if (user_mode(regs)) { 175 + tsk->thread.address = address; 176 + tsk->thread.error_code = write; 177 + force_sig_fault(SIGSEGV, si_code, (void __user *)address, current); 178 + return; 179 + } 180 + 181 + no_context: 182 + /* Are we prepared to handle this kernel fault? */ 183 + if (fixup_exception(regs)) 184 + return; 185 + 186 + /* 187 + * Oops. The kernel tried to access some bad page. We'll have to 188 + * terminate things with extreme prejudice. 189 + */ 190 + bust_spinlocks(1); 191 + pr_alert("Unable to %s at vaddr: %08lx, epc: %08lx\n", 192 + __func__, address, regs->pc); 193 + die_if_kernel("Oops", regs, write); 194 + 195 + out_of_memory: 196 + /* 197 + * We ran out of memory, call the OOM killer, and return the userspace 198 + * (which will retry the fault, or kill us if we got oom-killed). 199 + */ 200 + pagefault_out_of_memory(); 201 + return; 202 + 203 + do_sigbus: 204 + up_read(&mm->mmap_sem); 205 + 206 + /* Kernel mode? Handle exceptions or die */ 207 + if (!user_mode(regs)) 208 + goto no_context; 209 + 210 + tsk->thread.address = address; 211 + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current); 212 + }