Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:
"The main item in this pull request are the Spectre variant 1.1 fixes
from Julien Thierry.

A few other patches to improve various areas, and removal of some
obsolete mcount bits and a redundant kbuild conditional"

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
ARM: 8802/1: Call syscall_trace_exit even when system call skipped
ARM: 8797/1: spectre-v1.1: harden __copy_to_user
ARM: 8796/1: spectre-v1,v1.1: provide helpers for address sanitization
ARM: 8795/1: spectre-v1.1: use put_user() for __put_user()
ARM: 8794/1: uaccess: Prevent speculative use of the current addr_limit
ARM: 8793/1: signal: replace __put_user_error with __put_user
ARM: 8792/1: oabi-compat: copy oabi events using __copy_to_user()
ARM: 8791/1: vfp: use __copy_to_user() when saving VFP state
ARM: 8790/1: signal: always use __copy_to_user to save iwmmxt context
ARM: 8789/1: signal: copy registers using __copy_to_user()
ARM: 8801/1: makefile: use ARMv3M mode for RiscPC
ARM: 8800/1: use choice for kernel unwinders
ARM: 8798/1: remove unnecessary KBUILD_SRC ifeq conditional
ARM: 8788/1: ftrace: remove old mcount support
ARM: 8786/1: Debug kernel copy by printing

+202 -224
+26 -19
arch/arm/Kconfig.debug
··· 45 45 46 46 If in doubt, say "Y". 47 47 48 - # RMK wants arm kernels compiled with frame pointers or stack unwinding. 49 - # If you know what you are doing and are willing to live without stack 50 - # traces, you can get a slightly smaller kernel by setting this option to 51 - # n, but then RMK will have to kill you ;). 52 - config FRAME_POINTER 53 - bool 54 - depends on !THUMB2_KERNEL 55 - default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER 48 + choice 49 + prompt "Choose kernel unwinder" 50 + default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER 51 + default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER 56 52 help 57 - If you say N here, the resulting kernel will be slightly smaller and 58 - faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled, 59 - when a problem occurs with the kernel, the information that is 60 - reported is severely limited. 53 + This determines which method will be used for unwinding kernel stack 54 + traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack, 55 + livepatch, lockdep, and more. 61 56 62 - config ARM_UNWIND 63 - bool "Enable stack unwinding support (EXPERIMENTAL)" 57 + config UNWINDER_FRAME_POINTER 58 + bool "Frame pointer unwinder" 59 + depends on !THUMB2_KERNEL && !CC_IS_CLANG 60 + select ARCH_WANT_FRAME_POINTERS 61 + select FRAME_POINTER 62 + help 63 + This option enables the frame pointer unwinder for unwinding 64 + kernel stack traces. 65 + 66 + config UNWINDER_ARM 67 + bool "ARM EABI stack unwinder" 64 68 depends on AEABI 65 - default y 69 + select ARM_UNWIND 66 70 help 67 71 This option enables stack unwinding support in the kernel 68 72 using the information automatically generated by the 69 73 compiler. The resulting kernel image is slightly bigger but 70 74 the performance is not affected. Currently, this feature 71 - only works with EABI compilers. If unsure say Y. 75 + only works with EABI compilers. 72 76 73 - config OLD_MCOUNT 77 + endchoice 78 + 79 + config ARM_UNWIND 74 80 bool 75 - depends on FUNCTION_TRACER && FRAME_POINTER 76 - default y 81 + 82 + config FRAME_POINTER 83 + bool 77 84 78 85 config DEBUG_USER 79 86 bool "Verbose user fault messages"
+1 -5
arch/arm/Makefile
··· 74 74 arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t) 75 75 arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t 76 76 arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4 77 - arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3 77 + arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3m 78 78 79 79 # Evaluate arch cc-option calls now 80 80 arch-y := $(arch-y) ··· 264 264 265 265 ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y) 266 266 ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y) 267 - ifeq ($(KBUILD_SRC),) 268 - KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs)) 269 - else 270 267 KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs)) 271 - endif 272 268 endif 273 269 endif 274 270
+43
arch/arm/boot/compressed/head.S
··· 114 114 #endif 115 115 .endm 116 116 117 + /* 118 + * Debug kernel copy by printing the memory addresses involved 119 + */ 120 + .macro dbgkc, begin, end, cbegin, cend 121 + #ifdef DEBUG 122 + kputc #'\n' 123 + kputc #'C' 124 + kputc #':' 125 + kputc #'0' 126 + kputc #'x' 127 + kphex \begin, 8 /* Start of compressed kernel */ 128 + kputc #'-' 129 + kputc #'0' 130 + kputc #'x' 131 + kphex \end, 8 /* End of compressed kernel */ 132 + kputc #'-' 133 + kputc #'>' 134 + kputc #'0' 135 + kputc #'x' 136 + kphex \cbegin, 8 /* Start of kernel copy */ 137 + kputc #'-' 138 + kputc #'0' 139 + kputc #'x' 140 + kphex \cend, 8 /* End of kernel copy */ 141 + kputc #'\n' 142 + kputc #'\r' 143 + #endif 144 + .endm 145 + 117 146 .section ".start", #alloc, #execinstr 118 147 /* 119 148 * sort out different calling conventions ··· 478 449 bic r9, r9, #31 @ ... of 32 bytes 479 450 add r6, r9, r5 480 451 add r9, r9, r10 452 + 453 + #ifdef DEBUG 454 + sub r10, r6, r5 455 + sub r10, r9, r10 456 + /* 457 + * We are about to copy the kernel to a new memory area. 458 + * The boundaries of the new memory area can be found in 459 + * r10 and r9, whilst r5 and r6 contain the boundaries 460 + * of the memory we are going to copy. 461 + * Calling dbgkc will help with the printing of this 462 + * information. 463 + */ 464 + dbgkc r5, r6, r10, r9 465 + #endif 481 466 482 467 1: ldmdb r6!, {r0 - r3, r10 - r12, lr} 483 468 cmp r6, r5
+11
arch/arm/include/asm/assembler.h
··· 467 467 #endif 468 468 .endm 469 469 470 + .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req 471 + #ifdef CONFIG_CPU_SPECTRE 472 + sub \tmp, \limit, #1 473 + subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr 474 + addhs \tmp, \tmp, #1 @ if (tmp >= 0) { 475 + subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) } 476 + movlo \addr, #0 @ if (tmp < 0) addr = NULL 477 + csdb 478 + #endif 479 + .endm 480 + 470 481 .macro uaccess_disable, tmp, isb=1 471 482 #ifdef CONFIG_CPU_SW_DOMAIN_PAN 472 483 /*
-3
arch/arm/include/asm/ftrace.h
··· 16 16 17 17 #ifdef CONFIG_DYNAMIC_FTRACE 18 18 struct dyn_arch_ftrace { 19 - #ifdef CONFIG_OLD_MCOUNT 20 - bool old_mcount; 21 - #endif 22 19 }; 23 20 24 21 static inline unsigned long ftrace_call_adjust(unsigned long addr)
+2 -2
arch/arm/include/asm/thread_info.h
··· 121 121 struct user_vfp; 122 122 struct user_vfp_exc; 123 123 124 - extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *, 125 - struct user_vfp_exc __user *); 124 + extern int vfp_preserve_user_clear_hwstate(struct user_vfp *, 125 + struct user_vfp_exc *); 126 126 extern int vfp_restore_user_hwstate(struct user_vfp *, 127 127 struct user_vfp_exc *); 128 128 #endif
+43 -6
arch/arm/include/asm/uaccess.h
··· 69 69 static inline void set_fs(mm_segment_t fs) 70 70 { 71 71 current_thread_info()->addr_limit = fs; 72 + 73 + /* 74 + * Prevent a mispredicted conditional call to set_fs from forwarding 75 + * the wrong address limit to access_ok under speculation. 76 + */ 77 + dsb(nsh); 78 + isb(); 79 + 72 80 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); 73 81 } 74 82 ··· 98 90 */ 99 91 #define __inttype(x) \ 100 92 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 93 + 94 + /* 95 + * Sanitise a uaccess pointer such that it becomes NULL if addr+size 96 + * is above the current addr_limit. 97 + */ 98 + #define uaccess_mask_range_ptr(ptr, size) \ 99 + ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size)) 100 + static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr, 101 + size_t size) 102 + { 103 + void __user *safe_ptr = (void __user *)ptr; 104 + unsigned long tmp; 105 + 106 + asm volatile( 107 + " sub %1, %3, #1\n" 108 + " subs %1, %1, %0\n" 109 + " addhs %1, %1, #1\n" 110 + " subhss %1, %1, %2\n" 111 + " movlo %0, #0\n" 112 + : "+r" (safe_ptr), "=&r" (tmp) 113 + : "r" (size), "r" (current_thread_info()->addr_limit) 114 + : "cc"); 115 + 116 + csdb(); 117 + return safe_ptr; 118 + } 101 119 102 120 /* 103 121 * Single-value transfer routines. They automatically use the right ··· 396 362 __pu_err; \ 397 363 }) 398 364 365 + #ifdef CONFIG_CPU_SPECTRE 366 + /* 367 + * When mitigating Spectre variant 1.1, all accessors need to include 368 + * verification of the address space. 369 + */ 370 + #define __put_user(x, ptr) put_user(x, ptr) 371 + 372 + #else 399 373 #define __put_user(x, ptr) \ 400 374 ({ \ 401 375 long __pu_err = 0; \ 402 376 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \ 403 377 __pu_err; \ 404 - }) 405 - 406 - #define __put_user_error(x, ptr, err) \ 407 - ({ \ 408 - __put_user_switch((x), (ptr), (err), __put_user_nocheck); \ 409 - (void) 0; \ 410 378 }) 411 379 412 380 #define __put_user_nocheck(x, __pu_ptr, __err, __size) \ ··· 490 454 : "r" (x), "i" (-EFAULT) \ 491 455 : "cc") 492 456 457 + #endif /* !CONFIG_CPU_SPECTRE */ 493 458 494 459 #ifdef CONFIG_MMU 495 460 extern unsigned long __must_check
-3
arch/arm/kernel/armksyms.c
··· 167 167 #endif 168 168 169 169 #ifdef CONFIG_FUNCTION_TRACER 170 - #ifdef CONFIG_OLD_MCOUNT 171 - EXPORT_SYMBOL(mcount); 172 - #endif 173 170 EXPORT_SYMBOL(__gnu_mcount_nc); 174 171 #endif 175 172
+4 -5
arch/arm/kernel/entry-common.S
··· 296 296 cmp scno, #-1 @ skip the syscall? 297 297 bne 2b 298 298 add sp, sp, #S_OFF @ restore stack 299 - b ret_slow_syscall 300 299 301 - __sys_trace_return: 302 - str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 300 + __sys_trace_return_nosave: 301 + enable_irq_notrace 303 302 mov r0, sp 304 303 bl syscall_trace_exit 305 304 b ret_slow_syscall 306 305 307 - __sys_trace_return_nosave: 308 - enable_irq_notrace 306 + __sys_trace_return: 307 + str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 309 308 mov r0, sp 310 309 bl syscall_trace_exit 311 310 b ret_slow_syscall
+4 -71
arch/arm/kernel/entry-ftrace.S
··· 15 15 * start of every function. In mcount, apart from the function's address (in 16 16 * lr), we need to get hold of the function's caller's address. 17 17 * 18 - * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: 19 - * 20 - * bl mcount 21 - * 22 - * These versions have the limitation that in order for the mcount routine to 23 - * be able to determine the function's caller's address, an APCS-style frame 24 - * pointer (which is set up with something like the code below) is required. 25 - * 26 - * mov ip, sp 27 - * push {fp, ip, lr, pc} 28 - * sub fp, ip, #4 29 - * 30 - * With EABI, these frame pointers are not available unless -mapcs-frame is 31 - * specified, and if building as Thumb-2, not even then. 32 - * 33 - * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, 34 - * with call sites like: 18 + * Newer GCCs (4.4+) solve this problem by using a version of mcount with call 19 + * sites like: 35 20 * 36 21 * push {lr} 37 22 * bl __gnu_mcount_nc ··· 31 46 * allows it to be clobbered in subroutines and doesn't use it to hold 32 47 * parameters.) 33 48 * 34 - * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" 35 - * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see 36 - * arch/arm/kernel/ftrace.c). 49 + * When using dynamic ftrace, we patch out the mcount call by a "pop {lr}" 50 + * instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c). 37 51 */ 38 - 39 - #ifndef CONFIG_OLD_MCOUNT 40 - #if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) 41 - #error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. 42 - #endif 43 - #endif 44 52 45 53 .macro mcount_adjust_addr rd, rn 46 54 bic \rd, \rn, #1 @ clear the Thumb bit if present ··· 186 208 bl prepare_ftrace_return 187 209 mcount_exit 188 210 .endm 189 - 190 - #ifdef CONFIG_OLD_MCOUNT 191 - /* 192 - * mcount 193 - */ 194 - 195 - .macro mcount_enter 196 - stmdb sp!, {r0-r3, lr} 197 - .endm 198 - 199 - .macro mcount_get_lr reg 200 - ldr \reg, [fp, #-4] 201 - .endm 202 - 203 - .macro mcount_exit 204 - ldr lr, [fp, #-4] 205 - ldmia sp!, {r0-r3, pc} 206 - .endm 207 - 208 - ENTRY(mcount) 209 - #ifdef CONFIG_DYNAMIC_FTRACE 210 - stmdb sp!, {lr} 211 - ldr lr, [fp, #-4] 212 - ldmia sp!, {pc} 213 - #else 214 - __mcount _old 215 - #endif 216 - ENDPROC(mcount) 217 - 218 - #ifdef CONFIG_DYNAMIC_FTRACE 219 - ENTRY(ftrace_caller_old) 220 - __ftrace_caller _old 221 - ENDPROC(ftrace_caller_old) 222 - #endif 223 - 224 - #ifdef CONFIG_FUNCTION_GRAPH_TRACER 225 - ENTRY(ftrace_graph_caller_old) 226 - __ftrace_graph_caller 227 - ENDPROC(ftrace_graph_caller_old) 228 - #endif 229 - 230 - .purgem mcount_enter 231 - .purgem mcount_get_lr 232 - .purgem mcount_exit 233 - #endif 234 211 235 212 /* 236 213 * __gnu_mcount_nc
-51
arch/arm/kernel/ftrace.c
··· 47 47 stop_machine(__ftrace_modify_code, &command, NULL); 48 48 } 49 49 50 - #ifdef CONFIG_OLD_MCOUNT 51 - #define OLD_MCOUNT_ADDR ((unsigned long) mcount) 52 - #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) 53 - 54 - #define OLD_NOP 0xe1a00000 /* mov r0, r0 */ 55 - 56 - static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) 57 - { 58 - return rec->arch.old_mcount ? OLD_NOP : NOP; 59 - } 60 - 61 - static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) 62 - { 63 - if (!rec->arch.old_mcount) 64 - return addr; 65 - 66 - if (addr == MCOUNT_ADDR) 67 - addr = OLD_MCOUNT_ADDR; 68 - else if (addr == FTRACE_ADDR) 69 - addr = OLD_FTRACE_ADDR; 70 - 71 - return addr; 72 - } 73 - #else 74 50 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) 75 51 { 76 52 return NOP; ··· 56 80 { 57 81 return addr; 58 82 } 59 - #endif 60 83 61 84 int ftrace_arch_code_modify_prepare(void) 62 85 { ··· 125 150 } 126 151 #endif 127 152 128 - #ifdef CONFIG_OLD_MCOUNT 129 - if (!ret) { 130 - pc = (unsigned long)&ftrace_call_old; 131 - new = ftrace_call_replace(pc, (unsigned long)func); 132 - 133 - ret = ftrace_modify_code(pc, 0, new, false); 134 - } 135 - #endif 136 - 137 153 return ret; 138 154 } 139 155 ··· 168 202 old = ftrace_call_replace(ip, adjust_address(rec, addr)); 169 203 new = ftrace_nop_replace(rec); 170 204 ret = ftrace_modify_code(ip, old, new, true); 171 - 172 - #ifdef CONFIG_OLD_MCOUNT 173 - if (ret == -EINVAL && addr == MCOUNT_ADDR) { 174 - rec->arch.old_mcount = true; 175 - 176 - old = ftrace_call_replace(ip, adjust_address(rec, addr)); 177 - new = ftrace_nop_replace(rec); 178 - ret = ftrace_modify_code(ip, old, new, true); 179 - } 180 - #endif 181 205 182 206 return ret; 183 207 } ··· 245 289 enable); 246 290 #endif 247 291 248 - 249 - #ifdef CONFIG_OLD_MCOUNT 250 - if (!ret) 251 - ret = __ftrace_modify_caller(&ftrace_graph_call_old, 252 - ftrace_graph_caller_old, 253 - enable); 254 - #endif 255 292 256 293 return ret; 257 294 }
+43 -35
arch/arm/kernel/signal.c
··· 77 77 kframe->magic = IWMMXT_MAGIC; 78 78 kframe->size = IWMMXT_STORAGE_SIZE; 79 79 iwmmxt_task_copy(current_thread_info(), &kframe->storage); 80 - 81 - err = __copy_to_user(frame, kframe, sizeof(*frame)); 82 80 } else { 83 81 /* 84 82 * For bug-compatibility with older kernels, some space ··· 84 86 * Set the magic and size appropriately so that properly 85 87 * written userspace can skip it reliably: 86 88 */ 87 - __put_user_error(DUMMY_MAGIC, &frame->magic, err); 88 - __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err); 89 + *kframe = (struct iwmmxt_sigframe) { 90 + .magic = DUMMY_MAGIC, 91 + .size = IWMMXT_STORAGE_SIZE, 92 + }; 89 93 } 94 + 95 + err = __copy_to_user(frame, kframe, sizeof(*kframe)); 90 96 91 97 return err; 92 98 } ··· 137 135 138 136 static int preserve_vfp_context(struct vfp_sigframe __user *frame) 139 137 { 140 - const unsigned long magic = VFP_MAGIC; 141 - const unsigned long size = VFP_STORAGE_SIZE; 138 + struct vfp_sigframe kframe; 142 139 int err = 0; 143 140 144 - __put_user_error(magic, &frame->magic, err); 145 - __put_user_error(size, &frame->size, err); 141 + memset(&kframe, 0, sizeof(kframe)); 142 + kframe.magic = VFP_MAGIC; 143 + kframe.size = VFP_STORAGE_SIZE; 146 144 145 + err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc); 147 146 if (err) 148 - return -EFAULT; 147 + return err; 149 148 150 - return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); 149 + return __copy_to_user(frame, &kframe, sizeof(kframe)); 151 150 } 152 151 153 152 static int restore_vfp_context(char __user **auxp) ··· 291 288 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) 292 289 { 293 290 struct aux_sigframe __user *aux; 291 + struct sigcontext context; 294 292 int err = 0; 295 293 296 - __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); 297 - __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); 298 - __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); 299 - __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); 300 - __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); 301 - __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); 302 - __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); 303 - __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); 304 - __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); 305 - __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); 306 - __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); 307 - __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); 308 - __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); 309 - __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); 310 - __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); 311 - __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); 312 - __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); 294 + context = (struct sigcontext) { 295 + .arm_r0 = regs->ARM_r0, 296 + .arm_r1 = regs->ARM_r1, 297 + .arm_r2 = regs->ARM_r2, 298 + .arm_r3 = regs->ARM_r3, 299 + .arm_r4 = regs->ARM_r4, 300 + .arm_r5 = regs->ARM_r5, 301 + .arm_r6 = regs->ARM_r6, 302 + .arm_r7 = regs->ARM_r7, 303 + .arm_r8 = regs->ARM_r8, 304 + .arm_r9 = regs->ARM_r9, 305 + .arm_r10 = regs->ARM_r10, 306 + .arm_fp = regs->ARM_fp, 307 + .arm_ip = regs->ARM_ip, 308 + .arm_sp = regs->ARM_sp, 309 + .arm_lr = regs->ARM_lr, 310 + .arm_pc = regs->ARM_pc, 311 + .arm_cpsr = regs->ARM_cpsr, 313 312 314 - __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err); 315 - __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err); 316 - __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err); 317 - __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); 313 + .trap_no = current->thread.trap_no, 314 + .error_code = current->thread.error_code, 315 + .fault_address = current->thread.address, 316 + .oldmask = set->sig[0], 317 + }; 318 + 319 + err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context)); 318 320 319 321 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 320 322 ··· 336 328 if (err == 0) 337 329 err |= preserve_vfp_context(&aux->vfp); 338 330 #endif 339 - __put_user_error(0, &aux->end_magic, err); 331 + err |= __put_user(0, &aux->end_magic); 340 332 341 333 return err; 342 334 } ··· 499 491 /* 500 492 * Set uc.uc_flags to a value which sc.trap_no would never have. 501 493 */ 502 - __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); 494 + err = __put_user(0x5ac3c35a, &frame->uc.uc_flags); 503 495 504 496 err |= setup_sigframe(frame, regs, set); 505 497 if (err == 0) ··· 519 511 520 512 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 521 513 522 - __put_user_error(0, &frame->sig.uc.uc_flags, err); 523 - __put_user_error(NULL, &frame->sig.uc.uc_link, err); 514 + err |= __put_user(0, &frame->sig.uc.uc_flags); 515 + err |= __put_user(NULL, &frame->sig.uc.uc_link); 524 516 525 517 err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp); 526 518 err |= setup_sigframe(&frame->sig, regs, set);
+6 -2
arch/arm/kernel/sys_oabi-compat.c
··· 277 277 int maxevents, int timeout) 278 278 { 279 279 struct epoll_event *kbuf; 280 + struct oabi_epoll_event e; 280 281 mm_segment_t fs; 281 282 long ret, err, i; 282 283 ··· 296 295 set_fs(fs); 297 296 err = 0; 298 297 for (i = 0; i < ret; i++) { 299 - __put_user_error(kbuf[i].events, &events->events, err); 300 - __put_user_error(kbuf[i].data, &events->data, err); 298 + e.events = kbuf[i].events; 299 + e.data = kbuf[i].data; 300 + err = __copy_to_user(events, &e, sizeof(e)); 301 + if (err) 302 + break; 301 303 events++; 302 304 } 303 305 kfree(kbuf);
+1 -5
arch/arm/lib/copy_from_user.S
··· 93 93 #ifdef CONFIG_CPU_SPECTRE 94 94 get_thread_info r3 95 95 ldr r3, [r3, #TI_ADDR_LIMIT] 96 - adds ip, r1, r2 @ ip=addr+size 97 - sub r3, r3, #1 @ addr_limit - 1 98 - cmpcc ip, r3 @ if (addr+size > addr_limit - 1) 99 - movcs r1, #0 @ addr = NULL 100 - csdb 96 + uaccess_mask_range_ptr r1, r2, r3, ip 101 97 #endif 102 98 103 99 #include "copy_template.S"
+5 -1
arch/arm/lib/copy_to_user.S
··· 94 94 95 95 ENTRY(__copy_to_user_std) 96 96 WEAK(arm_copy_to_user) 97 + #ifdef CONFIG_CPU_SPECTRE 98 + get_thread_info r3 99 + ldr r3, [r3, #TI_ADDR_LIMIT] 100 + uaccess_mask_range_ptr r0, r2, r3, ip 101 + #endif 97 102 98 103 #include "copy_template.S" 99 104 ··· 113 108 rsb r0, r0, r2 114 109 copy_abort_end 115 110 .popsection 116 -
+2 -1
arch/arm/lib/uaccess_with_memcpy.c
··· 152 152 n = __copy_to_user_std(to, from, n); 153 153 uaccess_restore(ua_flags); 154 154 } else { 155 - n = __copy_to_user_memcpy(to, from, n); 155 + n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n), 156 + from, n); 156 157 } 157 158 return n; 158 159 }
+8 -12
arch/arm/vfp/vfpmodule.c
··· 553 553 * Save the current VFP state into the provided structures and prepare 554 554 * for entry into a new function (signal handler). 555 555 */ 556 - int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, 557 - struct user_vfp_exc __user *ufp_exc) 556 + int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp, 557 + struct user_vfp_exc *ufp_exc) 558 558 { 559 559 struct thread_info *thread = current_thread_info(); 560 560 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; 561 - int err = 0; 562 561 563 562 /* Ensure that the saved hwstate is up-to-date. */ 564 563 vfp_sync_hwstate(thread); ··· 566 567 * Copy the floating point registers. There can be unused 567 568 * registers see asm/hwcap.h for details. 568 569 */ 569 - err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs, 570 - sizeof(hwstate->fpregs)); 570 + memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs)); 571 + 571 572 /* 572 573 * Copy the status and control register. 573 574 */ 574 - __put_user_error(hwstate->fpscr, &ufp->fpscr, err); 575 + ufp->fpscr = hwstate->fpscr; 575 576 576 577 /* 577 578 * Copy the exception registers. 578 579 */ 579 - __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err); 580 - __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); 581 - __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); 582 - 583 - if (err) 584 - return -EFAULT; 580 + ufp_exc->fpexc = hwstate->fpexc; 581 + ufp_exc->fpinst = hwstate->fpinst; 582 + ufp_exc->fpinst2 = ufp_exc->fpinst2; 585 583 586 584 /* Ensure that VFP is disabled. */ 587 585 vfp_flush_hwstate(thread);
+3 -3
lib/Kconfig.debug
··· 1179 1179 bool 1180 1180 depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT 1181 1181 select STACKTRACE 1182 - select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !X86 1182 + select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86 1183 1183 select KALLSYMS 1184 1184 select KALLSYMS_ALL 1185 1185 ··· 1590 1590 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 1591 1591 depends on !X86_64 1592 1592 select STACKTRACE 1593 - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86 1593 + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86 1594 1594 help 1595 1595 Provide stacktrace filter for fault-injection capabilities 1596 1596 ··· 1599 1599 depends on DEBUG_KERNEL 1600 1600 depends on STACKTRACE_SUPPORT 1601 1601 depends on PROC_FS 1602 - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86 1602 + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86 1603 1603 select KALLSYMS 1604 1604 select KALLSYMS_ALL 1605 1605 select STACKTRACE