Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: __user annotations for __get/__put_user().

This adds in some more __user annotations. These weren't being
handled properly in some of the __get_user and __put_user paths,
so tidy those up.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

authored by

Paul Mundt and committed by
Paul Mundt
e08f457c 7a302a96

+49 -34
+5 -7
arch/sh/kernel/process.c
··· 17 17 #include <linux/kexec.h> 18 18 #include <linux/kdebug.h> 19 19 #include <linux/tick.h> 20 + #include <linux/reboot.h> 20 21 #include <asm/uaccess.h> 21 22 #include <asm/mmu_context.h> 22 23 #include <asm/pgalloc.h> ··· 450 449 /* 451 450 * sys_execve() executes a new program. 452 451 */ 453 - asmlinkage int sys_execve(char *ufilename, char **uargv, 454 - char **uenvp, unsigned long r7, 452 + asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv, 453 + char __user * __user *uenvp, unsigned long r7, 455 454 struct pt_regs __regs) 456 455 { 457 456 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 458 457 int error; 459 458 char *filename; 460 459 461 - filename = getname((char __user *)ufilename); 460 + filename = getname(ufilename); 462 461 error = PTR_ERR(filename); 463 462 if (IS_ERR(filename)) 464 463 goto out; 465 464 466 - error = do_execve(filename, 467 - (char __user * __user *)uargv, 468 - (char __user * __user *)uenvp, 469 - regs); 465 + error = do_execve(filename, uargv, uenvp, regs); 470 466 if (error == 0) { 471 467 task_lock(current); 472 468 current->ptrace &= ~PT_DTRACE;
+4 -4
arch/sh/kernel/ptrace.c
··· 99 99 ret = -EIO; 100 100 if (copied != sizeof(tmp)) 101 101 break; 102 - ret = put_user(tmp,(unsigned long *) data); 102 + ret = put_user(tmp,(unsigned long __user *) data); 103 103 break; 104 104 } 105 105 ··· 128 128 tmp = !!tsk_used_math(child); 129 129 else 130 130 tmp = 0; 131 - ret = put_user(tmp, (unsigned long *)data); 131 + ret = put_user(tmp, (unsigned long __user *)data); 132 132 break; 133 133 } 134 134 ··· 196 196 197 197 case PTRACE_SINGLESTEP: { /* set the trap flag. */ 198 198 long pc; 199 - struct pt_regs *dummy = NULL; 199 + struct pt_regs *regs = NULL; 200 200 201 201 ret = -EIO; 202 202 if (!valid_signal(data)) ··· 207 207 child->ptrace |= PT_DTRACE; 208 208 } 209 209 210 - pc = get_stack_long(child, (long)&dummy->pc); 210 + pc = get_stack_long(child, (long)&regs->pc); 211 211 212 212 /* Next scheduling will set up UBC */ 213 213 if (child->thread.ubc_pc == 0)
+2 -2
arch/sh/kernel/signal.c
··· 261 261 goto badframe; 262 262 /* It is more difficult to avoid calling this function than to 263 263 call it and ignore errors. */ 264 - do_sigaltstack(&st, NULL, regs->regs[15]); 264 + do_sigaltstack((const stack_t __user *)&st, NULL, (unsigned long)frame); 265 265 266 266 return r0; 267 267 268 268 badframe: 269 269 force_sig(SIGSEGV, current); 270 270 return 0; 271 - } 271 + } 272 272 273 273 /* 274 274 * Set up a signal frame.
+1 -1
arch/sh/kernel/traps.c
··· 581 581 info.si_signo = SIGBUS; 582 582 info.si_errno = 0; 583 583 info.si_code = si_code; 584 - info.si_addr = (void *) address; 584 + info.si_addr = (void __user *)address; 585 585 force_sig_info(SIGBUS, &info, current); 586 586 } else { 587 587 if (regs->pc & 1)
+1
include/asm-sh/page.h
··· 60 60 61 61 extern unsigned long shm_align_mask; 62 62 extern unsigned long max_low_pfn, min_low_pfn; 63 + extern unsigned long memory_start, memory_end; 63 64 64 65 #ifdef CONFIG_MMU 65 66 extern void clear_page_slow(void *to);
-2
include/asm-sh/sections.h
··· 3 3 4 4 #include <asm-generic/sections.h> 5 5 6 - extern char _end[]; 7 - 8 6 #endif /* __ASM_SH_SECTIONS_H */ 9 7
+14
include/asm-sh/system.h
··· 8 8 9 9 #include <linux/irqflags.h> 10 10 #include <linux/compiler.h> 11 + #include <linux/linkage.h> 11 12 #include <asm/types.h> 12 13 #include <asm/ptrace.h> 14 + 15 + struct task_struct *__switch_to(struct task_struct *prev, 16 + struct task_struct *next); 13 17 14 18 /* 15 19 * switch_to() should switch tasks to task nr n, first ··· 274 270 #define HAVE_DISABLE_HLT 275 271 void disable_hlt(void); 276 272 void enable_hlt(void); 273 + 274 + void default_idle(void); 275 + 276 + asmlinkage void break_point_trap(void); 277 + asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5, 278 + unsigned long r6, unsigned long r7, 279 + struct pt_regs __regs); 280 + asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5, 281 + unsigned long r6, unsigned long r7, 282 + struct pt_regs __regs); 277 283 278 284 #define arch_align_stack(x) (x) 279 285
+22 -18
include/asm-sh/uaccess.h
··· 61 61 */ 62 62 static inline int __access_ok(unsigned long addr, unsigned long size) 63 63 { 64 - extern unsigned long memory_start, memory_end; 65 - 66 64 return ((addr >= memory_start) && ((addr + size) < memory_end)); 67 65 } 68 66 #else /* CONFIG_MMU */ ··· 74 76 * __access_ok: Check if address with size is OK or not. 75 77 * 76 78 * We do three checks: 77 - * (1) is it user space? 79 + * (1) is it user space? 78 80 * (2) addr + size --> carry? 79 81 * (3) addr + size >= 0x80000000 (PAGE_OFFSET) 80 82 * ··· 140 142 __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 141 143 142 144 struct __large_struct { unsigned long buf[100]; }; 143 - #define __m(x) (*(struct __large_struct *)(x)) 145 + #define __m(x) (*(struct __large_struct __user *)(x)) 144 146 145 147 #define __get_user_size(x,ptr,size,retval) \ 146 148 do { \ 147 149 retval = 0; \ 150 + __chk_user_ptr(ptr); \ 148 151 switch (size) { \ 149 152 case 1: \ 150 153 __get_user_asm(x, ptr, retval, "b"); \ ··· 174 175 #define __get_user_check(x,ptr,size) \ 175 176 ({ \ 176 177 long __gu_err, __gu_val; \ 178 + __chk_user_ptr(ptr); \ 177 179 switch (size) { \ 178 180 case 1: \ 179 181 __get_user_1(__gu_val, (ptr), __gu_err); \ ··· 300 300 #define __put_user_size(x,ptr,size,retval) \ 301 301 do { \ 302 302 retval = 0; \ 303 + __chk_user_ptr(ptr); \ 303 304 switch (size) { \ 304 305 case 1: \ 305 306 __put_user_asm(x, ptr, retval, "b"); \ ··· 329 328 #define __put_user_check(x,ptr,size) \ 330 329 ({ \ 331 330 long __pu_err = -EFAULT; \ 332 - __typeof__(*(ptr)) *__pu_addr = (ptr); \ 331 + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 333 332 \ 334 333 if (__access_ok((unsigned long)__pu_addr,size)) \ 335 334 __put_user_size((x),__pu_addr,(size),__pu_err); \ ··· 407 406 #endif 408 407 409 408 extern void __put_user_unknown(void); 410 - 409 + 411 410 /* Generic arbitrary sized copy. */ 412 411 /* Return the number of bytes NOT copied */ 413 - extern __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); 412 + __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); 414 413 415 414 #define copy_to_user(to,from,n) ({ \ 416 415 void *__copy_to = (void *) (to); \ ··· 420 419 __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \ 421 420 } else __copy_res = __copy_size; \ 422 421 __copy_res; }) 423 - 424 - #define __copy_to_user(to,from,n) \ 425 - __copy_user((void *)(to), \ 426 - (void *)(from), n) 427 - 428 - #define __copy_to_user_inatomic __copy_to_user 429 - #define __copy_from_user_inatomic __copy_from_user 430 - 431 422 432 423 #define copy_from_user(to,from,n) ({ \ 433 424 void *__copy_to = (void *) (to); \ ··· 431 438 } else __copy_res = __copy_size; \ 432 439 __copy_res; }) 433 440 434 - #define __copy_from_user(to,from,n) \ 435 - __copy_user((void *)(to), \ 436 - (void *)(from), n) 441 + static __always_inline unsigned long 442 + __copy_from_user(void *to, const void __user *from, unsigned long n) 443 + { 444 + return __copy_user(to, (__force void *)from, n); 445 + } 446 + 447 + static __always_inline unsigned long __must_check 448 + __copy_to_user(void __user *to, const void *from, unsigned long n) 449 + { 450 + return __copy_user((__force void *)to, from, n); 451 + } 452 + 453 + #define __copy_to_user_inatomic __copy_to_user 454 + #define __copy_from_user_inatomic __copy_from_user 437 455 438 456 /* 439 457 * Clear the area and return remaining number of bytes