Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xtensa: add load/store exception handler

Memory attached to instruction bus of the xtensa CPU is only accessible
for a limited subset of opcodes. Other opcodes generate an exception
with the load/store error cause code. This property complicates use of
such systems. Provide a handler that recognizes and transparently fixes
such exceptions. The following opcodes are recognized when used outside
of FLIX bundles: l32i, l32i.n, l16ui, l16si, l8ui.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>

+136 -14
+12
arch/xtensa/Kconfig
··· 203 203 204 204 Say Y here to enable unaligned memory access in user space. 205 205 206 + config XTENSA_LOAD_STORE 207 + bool "Load/store exception handler for memory only readable with l32" 208 + help 209 + The Xtensa architecture only allows reading memory attached to its 210 + instruction bus with l32r and l32i instructions, all other 211 + instructions raise an exception with the LoadStoreErrorCause code. 212 + This makes it hard to use some configurations, e.g. store string 213 + literals in FLASH memory attached to the instruction bus. 214 + 215 + Say Y here to enable exception handler that allows transparent 216 + byte and 2-byte access to memory attached to instruction bus. 217 + 206 218 config HAVE_SMP 207 219 bool "System Supports SMP (MX)" 208 220 depends on XTENSA_VARIANT_CUSTOM
+5
arch/xtensa/include/asm/traps.h
··· 47 47 asmlinkage void fast_illegal_instruction_user(void); 48 48 asmlinkage void fast_syscall_user(void); 49 49 asmlinkage void fast_alloca(void); 50 + asmlinkage void fast_load_store(void); 50 51 asmlinkage void fast_unaligned(void); 51 52 asmlinkage void fast_second_level_miss(void); 52 53 asmlinkage void fast_store_prohibited(void); ··· 65 64 static inline void __init early_trap_init(void) 66 65 { 67 66 static struct exc_table init_exc_table __initdata = { 67 + #ifdef CONFIG_XTENSA_LOAD_STORE 68 + .fast_kernel_handler[EXCCAUSE_LOAD_STORE_ERROR] = 69 + fast_load_store, 70 + #endif 68 71 #ifdef CONFIG_MMU 69 72 .fast_kernel_handler[EXCCAUSE_DTLB_MISS] = 70 73 fast_second_level_miss,
+97 -12
arch/xtensa/kernel/align.S
··· 22 22 #include <asm/asmmacro.h> 23 23 #include <asm/processor.h> 24 24 25 - #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION 25 + #if XCHAL_UNALIGNED_LOAD_EXCEPTION || defined CONFIG_XTENSA_LOAD_STORE 26 + #define LOAD_EXCEPTION_HANDLER 27 + #endif 28 + 29 + #if XCHAL_UNALIGNED_STORE_EXCEPTION || defined LOAD_EXCEPTION_HANDLER 30 + #define ANY_EXCEPTION_HANDLER 31 + #endif 32 + 33 + #if XCHAL_HAVE_WINDOWED 34 + #define UNALIGNED_USER_EXCEPTION 35 + #endif 26 36 27 37 /* First-level exception handler for unaligned exceptions. 28 38 * ··· 67 57 * LE mask 0 0 X X / shift left 68 58 * BE shift left / mask 0 0 X X 69 59 */ 70 - 71 - #if XCHAL_HAVE_WINDOWED 72 - #define UNALIGNED_USER_EXCEPTION 73 - #endif 74 60 75 61 #if XCHAL_HAVE_BE 76 62 ··· 109 103 * 110 104 * 23 0 111 105 * ----------------------------- 112 - * res 0000 0010 106 + * L8UI xxxx xxxx 0000 ssss tttt 0010 113 107 * L16UI xxxx xxxx 0001 ssss tttt 0010 114 108 * L32I xxxx xxxx 0010 ssss tttt 0010 115 109 * XXX 0011 ssss tttt 0010 ··· 134 128 135 129 #define OP0_L32I_N 0x8 /* load immediate narrow */ 136 130 #define OP0_S32I_N 0x9 /* store immediate narrow */ 131 + #define OP0_LSAI 0x2 /* load/store */ 137 132 #define OP1_SI_MASK 0x4 /* OP1 bit set for stores */ 138 133 #define OP1_SI_BIT 2 /* OP1 bit number for stores */ 139 134 135 + #define OP1_L8UI 0x0 140 136 #define OP1_L32I 0x2 141 137 #define OP1_L16UI 0x1 142 138 #define OP1_L16SI 0x9 ··· 163 155 */ 164 156 165 157 .literal_position 158 + #ifdef CONFIG_XTENSA_LOAD_STORE 159 + ENTRY(fast_load_store) 160 + 161 + call0 .Lsave_and_load_instruction 162 + 163 + /* Analyze the instruction (load or store?). */ 164 + 165 + extui a0, a4, INSN_OP0, 4 # get insn.op0 nibble 166 + 167 + #if XCHAL_HAVE_DENSITY 168 + _beqi a0, OP0_L32I_N, 1f # L32I.N, jump 169 + #endif 170 + bnei a0, OP0_LSAI, .Linvalid_instruction 171 + /* 'store indicator bit' set, jump */ 172 + bbsi.l a4, OP1_SI_BIT + INSN_OP1, .Linvalid_instruction 173 + 174 + 1: 175 + movi a3, ~3 176 + and a3, a3, a8 # align memory address 177 + 178 + __ssa8 a8 179 + 180 + #ifdef CONFIG_MMU 181 + /* l32e can't be used here even when it's available. */ 182 + /* TODO access_ok(a3) could be used here */ 183 + j .Linvalid_instruction 184 + #endif 185 + l32i a5, a3, 0 186 + l32i a6, a3, 4 187 + __src_b a3, a5, a6 # a3 has the data word 188 + 189 + #if XCHAL_HAVE_DENSITY 190 + addi a7, a7, 2 # increment PC (assume 16-bit insn) 191 + _beqi a0, OP0_L32I_N, .Lload_w# l32i.n: jump 192 + addi a7, a7, 1 193 + #else 194 + addi a7, a7, 3 195 + #endif 196 + 197 + extui a5, a4, INSN_OP1, 4 198 + _beqi a5, OP1_L32I, .Lload_w 199 + bnei a5, OP1_L8UI, .Lload16 200 + extui a3, a3, 0, 8 201 + j .Lload_w 202 + 203 + ENDPROC(fast_load_store) 204 + #endif 205 + 206 + /* 207 + * Entry condition: 208 + * 209 + * a0: trashed, original value saved on stack (PT_AREG0) 210 + * a1: a1 211 + * a2: new stack pointer, original in DEPC 212 + * a3: a3 213 + * depc: a2, original value saved on stack (PT_DEPC) 214 + * excsave_1: dispatch table 215 + * 216 + * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 217 + * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 218 + */ 219 + 220 + #ifdef ANY_EXCEPTION_HANDLER 166 221 ENTRY(fast_unaligned) 222 + 223 + #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION 167 224 168 225 call0 .Lsave_and_load_instruction 169 226 ··· 244 171 /* 'store indicator bit' not set, jump */ 245 172 _bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload 246 173 174 + #endif 175 + #if XCHAL_UNALIGNED_STORE_EXCEPTION 176 + 247 177 /* Store: Jump to table entry to get the value in the source register.*/ 248 178 249 179 .Lstore:movi a5, .Lstore_table # table 250 180 extui a6, a4, INSN_T, 4 # get source register 251 181 addx8 a5, a6, a5 252 182 jx a5 # jump into table 183 + #endif 184 + #if XCHAL_UNALIGNED_LOAD_EXCEPTION 253 185 254 186 /* Load: Load memory address. */ 255 187 ··· 285 207 286 208 extui a5, a4, INSN_OP1, 4 287 209 _beqi a5, OP1_L32I, .Lload_w # l32i: jump 288 - 210 + #endif 211 + #ifdef LOAD_EXCEPTION_HANDLER 212 + .Lload16: 289 213 extui a3, a3, 0, 16 # extract lower 16 bits 290 214 _beqi a5, OP1_L16UI, .Lload_w 291 215 addi a5, a5, -OP1_L16SI ··· 327 247 mov a13, a3 ; _j .Lexit; .align 8 328 248 mov a14, a3 ; _j .Lexit; .align 8 329 249 mov a15, a3 ; _j .Lexit; .align 8 330 - 250 + #endif 251 + #if XCHAL_UNALIGNED_STORE_EXCEPTION 331 252 .Lstore_table: 332 253 l32i a3, a2, PT_AREG0; _j .Lstore_w; .align 8 333 254 mov a3, a1; _j .Lstore_w; .align 8 # fishy?? ··· 346 265 mov a3, a13 ; _j .Lstore_w; .align 8 347 266 mov a3, a14 ; _j .Lstore_w; .align 8 348 267 mov a3, a15 ; _j .Lstore_w; .align 8 268 + #endif 349 269 270 + #ifdef ANY_EXCEPTION_HANDLER 350 271 /* We cannot handle this exception. */ 351 272 352 273 .extern _kernel_exception ··· 377 294 378 295 2: movi a0, _user_exception 379 296 jx a0 297 + #endif 298 + #if XCHAL_UNALIGNED_STORE_EXCEPTION 380 299 381 300 # a7: instruction pointer, a4: instruction, a3: value 382 301 .Lstore_w: ··· 443 358 #else 444 359 s32i a6, a4, 4 445 360 #endif 446 - 361 + #endif 362 + #ifdef ANY_EXCEPTION_HANDLER 447 363 .Lexit: 448 364 #if XCHAL_HAVE_LOOPS 449 365 rsr a4, lend # check if we reached LEND ··· 539 453 __src_b a4, a4, a5 # a4 has the instruction 540 454 541 455 ret 542 - 456 + #endif 543 457 ENDPROC(fast_unaligned) 544 458 545 459 ENTRY(fast_unaligned_fixup) ··· 576 490 jx a0 577 491 578 492 ENDPROC(fast_unaligned_fixup) 579 - 580 - #endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */ 493 + #endif
+2 -1
arch/xtensa/kernel/setup.c
··· 245 245 { 246 246 /* Initialize basic exception handling if configuration may need it */ 247 247 248 - if (IS_ENABLED(CONFIG_KASAN)) 248 + if (IS_ENABLED(CONFIG_KASAN) || 249 + IS_ENABLED(CONFIG_XTENSA_LOAD_STORE)) 249 250 early_trap_init(); 250 251 251 252 /* Initialize MMU. */
+20 -1
arch/xtensa/kernel/traps.c
··· 54 54 #if XTENSA_FAKE_NMI 55 55 static void do_nmi(struct pt_regs *regs); 56 56 #endif 57 + #ifdef CONFIG_XTENSA_LOAD_STORE 58 + static void do_load_store(struct pt_regs *regs); 59 + #endif 57 60 static void do_unaligned_user(struct pt_regs *regs); 58 61 static void do_multihit(struct pt_regs *regs); 59 62 #if XTENSA_HAVE_COPROCESSORS ··· 92 89 { EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user }, 93 90 { EXCCAUSE_SYSTEM_CALL, 0, system_call }, 94 91 /* EXCCAUSE_INSTRUCTION_FETCH unhandled */ 95 - /* EXCCAUSE_LOAD_STORE_ERROR unhandled*/ 92 + #ifdef CONFIG_XTENSA_LOAD_STORE 93 + { EXCCAUSE_LOAD_STORE_ERROR, USER|KRNL, fast_load_store }, 94 + { EXCCAUSE_LOAD_STORE_ERROR, 0, do_load_store }, 95 + #endif 96 96 { EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt }, 97 97 #ifdef SUPPORT_WINDOWED 98 98 { EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca }, ··· 352 346 __die_if_kernel("Unhandled division by 0 in kernel", regs, SIGKILL); 353 347 force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->pc); 354 348 } 349 + 350 + #ifdef CONFIG_XTENSA_LOAD_STORE 351 + static void do_load_store(struct pt_regs *regs) 352 + { 353 + __die_if_kernel("Unhandled load/store exception in kernel", 354 + regs, SIGKILL); 355 + 356 + pr_info_ratelimited("Load/store error to %08lx in '%s' (pid = %d, pc = %#010lx)\n", 357 + regs->excvaddr, current->comm, 358 + task_pid_nr(current), regs->pc); 359 + force_sig_fault(SIGBUS, BUS_ADRERR, (void *)regs->excvaddr); 360 + } 361 + #endif 355 362 356 363 /* 357 364 * Handle unaligned memory accesses from user space. Kill task.