Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Remove 'type' argument from access_ok() function

Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument
of the user address range verification function since we got rid of the
old racy i386-only code to walk page tables by hand.

It existed because the original 80386 would not honor the write protect
bit when in kernel mode, so you had to do COW by hand before doing any
user access. But we haven't supported that in a long time, and these
days the 'type' argument is a purely historical artifact.

A discussion about extending 'user_access_begin()' to do the range
checking resulted this patch, because there is no way we're going to
move the old VERIFY_xyz interface to that model. And it's best done at
the end of the merge window when I've done most of my merges, so let's
just get this done once and for all.

This patch was mostly done with a sed-script, with manual fix-ups for
the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form.

There were a couple of notable cases:

- csky still had the old "verify_area()" name as an alias.

- the iter_iov code had magical hardcoded knowledge of the actual
values of VERIFY_{READ,WRITE} (not that they mattered, since nothing
really used it)

- microblaze used the type argument for a debug printout

but other than those oddities this should be a total no-op patch.

I tried to fix up all architectures, did fairly extensive grepping for
access_ok() uses, and the changes are trivial, but I may have missed
something. Any missed conversion should be trivially fixable, though.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

+610 -679
+1 -1
arch/alpha/include/asm/futex.h
··· 68 68 int ret = 0, cmp; 69 69 u32 prev; 70 70 71 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 71 + if (!access_ok(uaddr, sizeof(u32))) 72 72 return -EFAULT; 73 73 74 74 __asm__ __volatile__ (
+1 -1
arch/alpha/include/asm/uaccess.h
··· 36 36 #define __access_ok(addr, size) \ 37 37 ((get_fs().seg & (addr | size | (addr+size))) == 0) 38 38 39 - #define access_ok(type, addr, size) \ 39 + #define access_ok(addr, size) \ 40 40 ({ \ 41 41 __chk_user_ptr(addr); \ 42 42 __access_ok(((unsigned long)(addr)), (size)); \
+6 -6
arch/alpha/kernel/signal.c
··· 65 65 66 66 if (act) { 67 67 old_sigset_t mask; 68 - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 68 + if (!access_ok(act, sizeof(*act)) || 69 69 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 70 70 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 71 71 __get_user(mask, &act->sa_mask)) ··· 77 77 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 78 78 79 79 if (!ret && oact) { 80 - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 80 + if (!access_ok(oact, sizeof(*oact)) || 81 81 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 82 82 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 83 83 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) ··· 207 207 sigset_t set; 208 208 209 209 /* Verify that it's a good sigcontext before using it */ 210 - if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) 210 + if (!access_ok(sc, sizeof(*sc))) 211 211 goto give_sigsegv; 212 212 if (__get_user(set.sig[0], &sc->sc_mask)) 213 213 goto give_sigsegv; ··· 235 235 sigset_t set; 236 236 237 237 /* Verify that it's a good ucontext_t before using it */ 238 - if (!access_ok(VERIFY_READ, &frame->uc, sizeof(frame->uc))) 238 + if (!access_ok(&frame->uc, sizeof(frame->uc))) 239 239 goto give_sigsegv; 240 240 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 241 241 goto give_sigsegv; ··· 332 332 333 333 oldsp = rdusp(); 334 334 frame = get_sigframe(ksig, oldsp, sizeof(*frame)); 335 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 335 + if (!access_ok(frame, sizeof(*frame))) 336 336 return -EFAULT; 337 337 338 338 err |= setup_sigcontext(&frame->sc, regs, set->sig[0], oldsp); ··· 377 377 378 378 oldsp = rdusp(); 379 379 frame = get_sigframe(ksig, oldsp, sizeof(*frame)); 380 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 380 + if (!access_ok(frame, sizeof(*frame))) 381 381 return -EFAULT; 382 382 383 383 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+1 -1
arch/alpha/lib/csum_partial_copy.c
··· 333 333 unsigned long doff = 7 & (unsigned long) dst; 334 334 335 335 if (len) { 336 - if (!access_ok(VERIFY_READ, src, len)) { 336 + if (!access_ok(src, len)) { 337 337 if (errp) *errp = -EFAULT; 338 338 memset(dst, 0, len); 339 339 return sum;
+1 -1
arch/arc/include/asm/futex.h
··· 126 126 int ret = 0; 127 127 u32 existval; 128 128 129 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 129 + if (!access_ok(uaddr, sizeof(u32))) 130 130 return -EFAULT; 131 131 132 132 #ifndef CONFIG_ARC_HAS_LLSC
+1 -1
arch/arc/kernel/process.c
··· 61 61 /* Z indicates to userspace if operation succeded */ 62 62 regs->status32 &= ~STATUS_Z_MASK; 63 63 64 - ret = access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)); 64 + ret = access_ok(uaddr, sizeof(*uaddr)); 65 65 if (!ret) 66 66 goto fail; 67 67
+2 -2
arch/arc/kernel/signal.c
··· 169 169 170 170 sf = (struct rt_sigframe __force __user *)(regs->sp); 171 171 172 - if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) 172 + if (!access_ok(sf, sizeof(*sf))) 173 173 goto badframe; 174 174 175 175 if (__get_user(magic, &sf->sigret_magic)) ··· 219 219 frame = (void __user *)((sp - framesize) & ~7); 220 220 221 221 /* Check that we can actually write to the signal frame */ 222 - if (!access_ok(VERIFY_WRITE, frame, framesize)) 222 + if (!access_ok(frame, framesize)) 223 223 frame = NULL; 224 224 225 225 return frame;
+2 -2
arch/arm/include/asm/futex.h
··· 50 50 int ret; 51 51 u32 val; 52 52 53 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 53 + if (!access_ok(uaddr, sizeof(u32))) 54 54 return -EFAULT; 55 55 56 56 smp_mb(); ··· 104 104 int ret = 0; 105 105 u32 val; 106 106 107 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 107 + if (!access_ok(uaddr, sizeof(u32))) 108 108 return -EFAULT; 109 109 110 110 preempt_disable();
+2 -2
arch/arm/include/asm/uaccess.h
··· 279 279 280 280 #endif /* CONFIG_MMU */ 281 281 282 - #define access_ok(type, addr, size) (__range_ok(addr, size) == 0) 282 + #define access_ok(addr, size) (__range_ok(addr, size) == 0) 283 283 284 284 #define user_addr_max() \ 285 285 (uaccess_kernel() ? ~0UL : get_fs()) ··· 560 560 561 561 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 562 562 { 563 - if (access_ok(VERIFY_WRITE, to, n)) 563 + if (access_ok(to, n)) 564 564 n = __clear_user(to, n); 565 565 return n; 566 566 }
+1 -1
arch/arm/kernel/perf_callchain.c
··· 37 37 struct frame_tail buftail; 38 38 unsigned long err; 39 39 40 - if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) 40 + if (!access_ok(tail, sizeof(buftail))) 41 41 return NULL; 42 42 43 43 pagefault_disable();
+3 -3
arch/arm/kernel/signal.c
··· 241 241 242 242 frame = (struct sigframe __user *)regs->ARM_sp; 243 243 244 - if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) 244 + if (!access_ok(frame, sizeof (*frame))) 245 245 goto badframe; 246 246 247 247 if (restore_sigframe(regs, frame)) ··· 271 271 272 272 frame = (struct rt_sigframe __user *)regs->ARM_sp; 273 273 274 - if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) 274 + if (!access_ok(frame, sizeof (*frame))) 275 275 goto badframe; 276 276 277 277 if (restore_sigframe(regs, &frame->sig)) ··· 355 355 /* 356 356 * Check that we can actually write to the signal frame. 357 357 */ 358 - if (!access_ok(VERIFY_WRITE, frame, framesize)) 358 + if (!access_ok(frame, framesize)) 359 359 frame = NULL; 360 360 361 361 return frame;
+1 -1
arch/arm/kernel/swp_emulate.c
··· 198 198 destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data); 199 199 200 200 /* Check access in reasonable access range for both SWP and SWPB */ 201 - if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) { 201 + if (!access_ok((address & ~3), 4)) { 202 202 pr_debug("SWP{B} emulation: access to %p not allowed!\n", 203 203 (void *)address); 204 204 res = -EFAULT;
+2 -2
arch/arm/kernel/sys_oabi-compat.c
··· 285 285 maxevents > (INT_MAX/sizeof(*kbuf)) || 286 286 maxevents > (INT_MAX/sizeof(*events))) 287 287 return -EINVAL; 288 - if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents)) 288 + if (!access_ok(events, sizeof(*events) * maxevents)) 289 289 return -EFAULT; 290 290 kbuf = kmalloc_array(maxevents, sizeof(*kbuf), GFP_KERNEL); 291 291 if (!kbuf) ··· 326 326 327 327 if (nsops < 1 || nsops > SEMOPM) 328 328 return -EINVAL; 329 - if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops)) 329 + if (!access_ok(tsops, sizeof(*tsops) * nsops)) 330 330 return -EFAULT; 331 331 sops = kmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); 332 332 if (!sops)
+1 -1
arch/arm/kernel/traps.c
··· 582 582 if (end < start || flags) 583 583 return -EINVAL; 584 584 585 - if (!access_ok(VERIFY_READ, start, end - start)) 585 + if (!access_ok(start, end - start)) 586 586 return -EFAULT; 587 587 588 588 return __do_cache_op(start, end);
+1 -1
arch/arm/oprofile/common.c
··· 88 88 struct frame_tail buftail[2]; 89 89 90 90 /* Also check accessibility of one struct frame_tail beyond */ 91 - if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) 91 + if (!access_ok(tail, sizeof(buftail))) 92 92 return NULL; 93 93 if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) 94 94 return NULL;
+1 -1
arch/arm64/include/asm/futex.h
··· 96 96 u32 val, tmp; 97 97 u32 __user *uaddr; 98 98 99 - if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32))) 99 + if (!access_ok(_uaddr, sizeof(u32))) 100 100 return -EFAULT; 101 101 102 102 uaddr = __uaccess_mask_ptr(_uaddr);
+4 -4
arch/arm64/include/asm/uaccess.h
··· 95 95 return ret; 96 96 } 97 97 98 - #define access_ok(type, addr, size) __range_ok(addr, size) 98 + #define access_ok(addr, size) __range_ok(addr, size) 99 99 #define user_addr_max get_fs 100 100 101 101 #define _ASM_EXTABLE(from, to) \ ··· 301 301 ({ \ 302 302 __typeof__(*(ptr)) __user *__p = (ptr); \ 303 303 might_fault(); \ 304 - if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ 304 + if (access_ok(__p, sizeof(*__p))) { \ 305 305 __p = uaccess_mask_ptr(__p); \ 306 306 __get_user_err((x), __p, (err)); \ 307 307 } else { \ ··· 370 370 ({ \ 371 371 __typeof__(*(ptr)) __user *__p = (ptr); \ 372 372 might_fault(); \ 373 - if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ 373 + if (access_ok(__p, sizeof(*__p))) { \ 374 374 __p = uaccess_mask_ptr(__p); \ 375 375 __put_user_err((x), __p, (err)); \ 376 376 } else { \ ··· 418 418 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); 419 419 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) 420 420 { 421 - if (access_ok(VERIFY_WRITE, to, n)) 421 + if (access_ok(to, n)) 422 422 n = __arch_clear_user(__uaccess_mask_ptr(to), n); 423 423 return n; 424 424 }
+1 -1
arch/arm64/kernel/armv8_deprecated.c
··· 402 402 403 403 /* Check access in reasonable access range for both SWP and SWPB */ 404 404 user_ptr = (const void __user *)(unsigned long)(address & ~3); 405 - if (!access_ok(VERIFY_WRITE, user_ptr, 4)) { 405 + if (!access_ok(user_ptr, 4)) { 406 406 pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n", 407 407 address); 408 408 goto fault;
+2 -2
arch/arm64/kernel/perf_callchain.c
··· 39 39 unsigned long lr; 40 40 41 41 /* Also check accessibility of one struct frame_tail beyond */ 42 - if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) 42 + if (!access_ok(tail, sizeof(buftail))) 43 43 return NULL; 44 44 45 45 pagefault_disable(); ··· 86 86 unsigned long err; 87 87 88 88 /* Also check accessibility of one struct frame_tail beyond */ 89 - if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) 89 + if (!access_ok(tail, sizeof(buftail))) 90 90 return NULL; 91 91 92 92 pagefault_disable();
+3 -3
arch/arm64/kernel/signal.c
··· 470 470 offset = 0; 471 471 limit = extra_size; 472 472 473 - if (!access_ok(VERIFY_READ, base, limit)) 473 + if (!access_ok(base, limit)) 474 474 goto invalid; 475 475 476 476 continue; ··· 556 556 557 557 frame = (struct rt_sigframe __user *)regs->sp; 558 558 559 - if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) 559 + if (!access_ok(frame, sizeof (*frame))) 560 560 goto badframe; 561 561 562 562 if (restore_sigframe(regs, frame)) ··· 730 730 /* 731 731 * Check that we can actually write to the signal frame. 732 732 */ 733 - if (!access_ok(VERIFY_WRITE, user->sigframe, sp_top - sp)) 733 + if (!access_ok(user->sigframe, sp_top - sp)) 734 734 return -EFAULT; 735 735 736 736 return 0;
+3 -3
arch/arm64/kernel/signal32.c
··· 303 303 304 304 frame = (struct compat_sigframe __user *)regs->compat_sp; 305 305 306 - if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) 306 + if (!access_ok(frame, sizeof (*frame))) 307 307 goto badframe; 308 308 309 309 if (compat_restore_sigframe(regs, frame)) ··· 334 334 335 335 frame = (struct compat_rt_sigframe __user *)regs->compat_sp; 336 336 337 - if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) 337 + if (!access_ok(frame, sizeof (*frame))) 338 338 goto badframe; 339 339 340 340 if (compat_restore_sigframe(regs, &frame->sig)) ··· 365 365 /* 366 366 * Check that we can actually write to the signal frame. 367 367 */ 368 - if (!access_ok(VERIFY_WRITE, frame, framesize)) 368 + if (!access_ok(frame, framesize)) 369 369 frame = NULL; 370 370 371 371 return frame;
+1 -1
arch/arm64/kernel/sys_compat.c
··· 58 58 if (end < start || flags) 59 59 return -EINVAL; 60 60 61 - if (!access_ok(VERIFY_READ, (const void __user *)start, end - start)) 61 + if (!access_ok((const void __user *)start, end - start)) 62 62 return -EFAULT; 63 63 64 64 return __do_compat_cache_op(start, end);
+2 -2
arch/c6x/kernel/signal.c
··· 80 80 81 81 frame = (struct rt_sigframe __user *) ((unsigned long) regs->sp + 8); 82 82 83 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 83 + if (!access_ok(frame, sizeof(*frame))) 84 84 goto badframe; 85 85 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 86 86 goto badframe; ··· 149 149 150 150 frame = get_sigframe(ksig, regs, sizeof(*frame)); 151 151 152 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 152 + if (!access_ok(frame, sizeof(*frame))) 153 153 return -EFAULT; 154 154 155 155 err |= __put_user(&frame->info, &frame->pinfo);
+2 -2
arch/csky/abiv1/alignment.c
··· 32 32 uint32_t val; 33 33 int err; 34 34 35 - if (!access_ok(VERIFY_READ, (void *)addr, 1)) 35 + if (!access_ok((void *)addr, 1)) 36 36 return 1; 37 37 38 38 asm volatile ( ··· 67 67 { 68 68 int err; 69 69 70 - if (!access_ok(VERIFY_WRITE, (void *)addr, 1)) 70 + if (!access_ok((void *)addr, 1)) 71 71 return 1; 72 72 73 73 asm volatile (
+4 -12
arch/csky/include/asm/uaccess.h
··· 16 16 #include <linux/version.h> 17 17 #include <asm/segment.h> 18 18 19 - #define VERIFY_READ 0 20 - #define VERIFY_WRITE 1 21 - 22 - static inline int access_ok(int type, const void *addr, unsigned long size) 19 + static inline int access_ok(const void *addr, unsigned long size) 23 20 { 24 21 unsigned long limit = current_thread_info()->addr_limit.seg; 25 22 ··· 24 27 ((unsigned long)(addr + size) < limit)); 25 28 } 26 29 27 - static inline int verify_area(int type, const void *addr, unsigned long size) 28 - { 29 - return access_ok(type, addr, size) ? 0 : -EFAULT; 30 - } 31 - 32 - #define __addr_ok(addr) (access_ok(VERIFY_READ, addr, 0)) 30 + #define __addr_ok(addr) (access_ok(addr, 0)) 33 31 34 32 extern int __put_user_bad(void); 35 33 ··· 83 91 long __pu_err = -EFAULT; \ 84 92 typeof(*(ptr)) *__pu_addr = (ptr); \ 85 93 typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \ 86 - if (access_ok(VERIFY_WRITE, __pu_addr, size) && __pu_addr) \ 94 + if (access_ok(__pu_addr, size) && __pu_addr) \ 87 95 __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \ 88 96 __pu_err; \ 89 97 }) ··· 209 217 ({ \ 210 218 int __gu_err = -EFAULT; \ 211 219 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ 212 - if (access_ok(VERIFY_READ, __gu_ptr, size) && __gu_ptr) \ 220 + if (access_ok(__gu_ptr, size) && __gu_ptr) \ 213 221 __get_user_size(x, __gu_ptr, size, __gu_err); \ 214 222 __gu_err; \ 215 223 })
+1 -1
arch/csky/kernel/signal.c
··· 88 88 struct pt_regs *regs = current_pt_regs(); 89 89 struct rt_sigframe *frame = (struct rt_sigframe *)(regs->usp); 90 90 91 - if (verify_area(VERIFY_READ, frame, sizeof(*frame))) 91 + if (!access_ok(frame, sizeof(*frame))) 92 92 goto badframe; 93 93 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 94 94 goto badframe;
+4 -4
arch/csky/lib/usercopy.c
··· 7 7 unsigned long raw_copy_from_user(void *to, const void *from, 8 8 unsigned long n) 9 9 { 10 - if (access_ok(VERIFY_READ, from, n)) 10 + if (access_ok(from, n)) 11 11 __copy_user_zeroing(to, from, n); 12 12 else 13 13 memset(to, 0, n); ··· 18 18 unsigned long raw_copy_to_user(void *to, const void *from, 19 19 unsigned long n) 20 20 { 21 - if (access_ok(VERIFY_WRITE, to, n)) 21 + if (access_ok(to, n)) 22 22 __copy_user(to, from, n); 23 23 return n; 24 24 } ··· 113 113 { 114 114 long res = -EFAULT; 115 115 116 - if (access_ok(VERIFY_READ, src, 1)) 116 + if (access_ok(src, 1)) 117 117 __do_strncpy_from_user(dst, src, count, res); 118 118 return res; 119 119 } ··· 236 236 unsigned long 237 237 clear_user(void __user *to, unsigned long n) 238 238 { 239 - if (access_ok(VERIFY_WRITE, to, n)) 239 + if (access_ok(to, n)) 240 240 __do_clear_user(to, n); 241 241 return n; 242 242 }
+2 -2
arch/h8300/kernel/signal.c
··· 110 110 sigset_t set; 111 111 int er0; 112 112 113 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 113 + if (!access_ok(frame, sizeof(*frame))) 114 114 goto badframe; 115 115 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 116 116 goto badframe; ··· 165 165 166 166 frame = get_sigframe(ksig, regs, sizeof(*frame)); 167 167 168 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 168 + if (!access_ok(frame, sizeof(*frame))) 169 169 return -EFAULT; 170 170 171 171 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
+1 -1
arch/hexagon/include/asm/futex.h
··· 77 77 int prev; 78 78 int ret; 79 79 80 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 80 + if (!access_ok(uaddr, sizeof(u32))) 81 81 return -EFAULT; 82 82 83 83 __asm__ __volatile__ (
-3
arch/hexagon/include/asm/uaccess.h
··· 29 29 30 30 /* 31 31 * access_ok: - Checks if a user space pointer is valid 32 - * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 33 - * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 34 - * to write to a block, it is always safe to read from it. 35 32 * @addr: User space pointer to start of block to check 36 33 * @size: Size of block to check 37 34 *
+2 -2
arch/hexagon/kernel/signal.c
··· 115 115 116 116 frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe)); 117 117 118 - if (!access_ok(VERIFY_WRITE, frame, sizeof(struct rt_sigframe))) 118 + if (!access_ok(frame, sizeof(struct rt_sigframe))) 119 119 return -EFAULT; 120 120 121 121 if (copy_siginfo_to_user(&frame->info, &ksig->info)) ··· 244 244 current->restart_block.fn = do_no_restart_syscall; 245 245 246 246 frame = (struct rt_sigframe __user *)pt_psp(regs); 247 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 247 + if (!access_ok(frame, sizeof(*frame))) 248 248 goto badframe; 249 249 if (__copy_from_user(&blocked, &frame->uc.uc_sigmask, sizeof(blocked))) 250 250 goto badframe;
+1 -1
arch/hexagon/mm/uaccess.c
··· 51 51 52 52 unsigned long clear_user_hexagon(void __user *dest, unsigned long count) 53 53 { 54 - if (!access_ok(VERIFY_WRITE, dest, count)) 54 + if (!access_ok(dest, count)) 55 55 return count; 56 56 else 57 57 return __clear_user_hexagon(dest, count);
+1 -1
arch/ia64/include/asm/futex.h
··· 86 86 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 87 87 u32 oldval, u32 newval) 88 88 { 89 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 89 + if (!access_ok(uaddr, sizeof(u32))) 90 90 return -EFAULT; 91 91 92 92 {
+1 -1
arch/ia64/include/asm/uaccess.h
··· 67 67 return likely(addr <= seg) && 68 68 (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); 69 69 } 70 - #define access_ok(type, addr, size) __access_ok((addr), (size)) 70 + #define access_ok(addr, size) __access_ok((addr), (size)) 71 71 72 72 /* 73 73 * These are the main single-value transfer routines. They automatically
+2 -2
arch/ia64/kernel/ptrace.c
··· 836 836 char nat = 0; 837 837 int i; 838 838 839 - if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) 839 + if (!access_ok(ppr, sizeof(struct pt_all_user_regs))) 840 840 return -EIO; 841 841 842 842 pt = task_pt_regs(child); ··· 981 981 982 982 memset(&fpval, 0, sizeof(fpval)); 983 983 984 - if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) 984 + if (!access_ok(ppr, sizeof(struct pt_all_user_regs))) 985 985 return -EIO; 986 986 987 987 pt = task_pt_regs(child);
+2 -2
arch/ia64/kernel/signal.c
··· 132 132 */ 133 133 retval = (long) &ia64_strace_leave_kernel; 134 134 135 - if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) 135 + if (!access_ok(sc, sizeof(*sc))) 136 136 goto give_sigsegv; 137 137 138 138 if (GET_SIGSET(&set, &sc->sc_mask)) ··· 264 264 } 265 265 frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); 266 266 267 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) { 267 + if (!access_ok(frame, sizeof(*frame))) { 268 268 force_sigsegv(ksig->sig, current); 269 269 return 1; 270 270 }
+1 -1
arch/m68k/include/asm/uaccess_mm.h
··· 10 10 #include <asm/segment.h> 11 11 12 12 /* We let the MMU do all checking */ 13 - static inline int access_ok(int type, const void __user *addr, 13 + static inline int access_ok(const void __user *addr, 14 14 unsigned long size) 15 15 { 16 16 return 1;
+1 -1
arch/m68k/include/asm/uaccess_no.h
··· 10 10 11 11 #include <asm/segment.h> 12 12 13 - #define access_ok(type,addr,size) _access_ok((unsigned long)(addr),(size)) 13 + #define access_ok(addr,size) _access_ok((unsigned long)(addr),(size)) 14 14 15 15 /* 16 16 * It is not enough to just have access_ok check for a real RAM address.
+2 -2
arch/m68k/kernel/signal.c
··· 787 787 struct sigframe __user *frame = (struct sigframe __user *)(usp - 4); 788 788 sigset_t set; 789 789 790 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 790 + if (!access_ok(frame, sizeof(*frame))) 791 791 goto badframe; 792 792 if (__get_user(set.sig[0], &frame->sc.sc_mask) || 793 793 (_NSIG_WORDS > 1 && ··· 812 812 struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4); 813 813 sigset_t set; 814 814 815 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 815 + if (!access_ok(frame, sizeof(*frame))) 816 816 goto badframe; 817 817 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 818 818 goto badframe;
+1 -1
arch/microblaze/include/asm/futex.h
··· 71 71 int ret = 0, cmp; 72 72 u32 prev; 73 73 74 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 74 + if (!access_ok(uaddr, sizeof(u32))) 75 75 return -EFAULT; 76 76 77 77 __asm__ __volatile__ ("1: lwx %1, %3, r0; \
+11 -12
arch/microblaze/include/asm/uaccess.h
··· 60 60 #define __range_ok(addr, size) \ 61 61 ___range_ok((unsigned long)(addr), (unsigned long)(size)) 62 62 63 - #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) 63 + #define access_ok(addr, size) (__range_ok((addr), (size)) == 0) 64 64 65 65 #else 66 66 67 - static inline int access_ok(int type, const void __user *addr, 68 - unsigned long size) 67 + static inline int access_ok(const void __user *addr, unsigned long size) 69 68 { 70 69 if (!size) 71 70 goto ok; 72 71 73 72 if ((get_fs().seg < ((unsigned long)addr)) || 74 73 (get_fs().seg < ((unsigned long)addr + size - 1))) { 75 - pr_devel("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", 76 - type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, 74 + pr_devel("ACCESS fail at 0x%08x (size 0x%x), seg 0x%08x\n", 75 + (__force u32)addr, (u32)size, 77 76 (u32)get_fs().seg); 78 77 return 0; 79 78 } 80 79 ok: 81 - pr_devel("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", 82 - type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, 80 + pr_devel("ACCESS OK at 0x%08x (size 0x%x), seg 0x%08x\n", 81 + (__force u32)addr, (u32)size, 83 82 (u32)get_fs().seg); 84 83 return 1; 85 84 } ··· 119 120 unsigned long n) 120 121 { 121 122 might_fault(); 122 - if (unlikely(!access_ok(VERIFY_WRITE, to, n))) 123 + if (unlikely(!access_ok(to, n))) 123 124 return n; 124 125 125 126 return __clear_user(to, n); ··· 173 174 const typeof(*(ptr)) __user *__gu_addr = (ptr); \ 174 175 int __gu_err = 0; \ 175 176 \ 176 - if (access_ok(VERIFY_READ, __gu_addr, size)) { \ 177 + if (access_ok(__gu_addr, size)) { \ 177 178 switch (size) { \ 178 179 case 1: \ 179 180 __get_user_asm("lbu", __gu_addr, __gu_val, \ ··· 285 286 typeof(*(ptr)) __user *__pu_addr = (ptr); \ 286 287 int __pu_err = 0; \ 287 288 \ 288 - if (access_ok(VERIFY_WRITE, __pu_addr, size)) { \ 289 + if (access_ok(__pu_addr, size)) { \ 289 290 switch (size) { \ 290 291 case 1: \ 291 292 __put_user_asm("sb", __pu_addr, __pu_val, \ ··· 357 358 static inline long 358 359 strncpy_from_user(char *dst, const char __user *src, long count) 359 360 { 360 - if (!access_ok(VERIFY_READ, src, 1)) 361 + if (!access_ok(src, 1)) 361 362 return -EFAULT; 362 363 return __strncpy_user(dst, src, count); 363 364 } ··· 371 372 372 373 static inline long strnlen_user(const char __user *src, long n) 373 374 { 374 - if (!access_ok(VERIFY_READ, src, 1)) 375 + if (!access_ok(src, 1)) 375 376 return 0; 376 377 return __strnlen_user(src, n); 377 378 }
+2 -2
arch/microblaze/kernel/signal.c
··· 91 91 /* Always make any pending restarted system calls return -EINTR */ 92 92 current->restart_block.fn = do_no_restart_syscall; 93 93 94 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 94 + if (!access_ok(frame, sizeof(*frame))) 95 95 goto badframe; 96 96 97 97 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) ··· 166 166 167 167 frame = get_sigframe(ksig, regs, sizeof(*frame)); 168 168 169 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 169 + if (!access_ok(frame, sizeof(*frame))) 170 170 return -EFAULT; 171 171 172 172 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
+2 -2
arch/mips/include/asm/checksum.h
··· 63 63 __wsum csum_and_copy_from_user(const void __user *src, void *dst, 64 64 int len, __wsum sum, int *err_ptr) 65 65 { 66 - if (access_ok(VERIFY_READ, src, len)) 66 + if (access_ok(src, len)) 67 67 return csum_partial_copy_from_user(src, dst, len, sum, 68 68 err_ptr); 69 69 if (len) ··· 81 81 __wsum sum, int *err_ptr) 82 82 { 83 83 might_fault(); 84 - if (access_ok(VERIFY_WRITE, dst, len)) { 84 + if (access_ok(dst, len)) { 85 85 if (uaccess_kernel()) 86 86 return __csum_partial_copy_kernel(src, 87 87 (__force void *)dst,
+1 -1
arch/mips/include/asm/futex.h
··· 129 129 int ret = 0; 130 130 u32 val; 131 131 132 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 132 + if (!access_ok(uaddr, sizeof(u32))) 133 133 return -EFAULT; 134 134 135 135 if (cpu_has_llsc && R10000_LLSC_WAR) {
+2 -2
arch/mips/include/asm/termios.h
··· 32 32 unsigned short iflag, oflag, cflag, lflag; 33 33 unsigned int err; 34 34 35 - if (!access_ok(VERIFY_READ, termio, sizeof(struct termio))) 35 + if (!access_ok(termio, sizeof(struct termio))) 36 36 return -EFAULT; 37 37 38 38 err = __get_user(iflag, &termio->c_iflag); ··· 61 61 { 62 62 int err; 63 63 64 - if (!access_ok(VERIFY_WRITE, termio, sizeof(struct termio))) 64 + if (!access_ok(termio, sizeof(struct termio))) 65 65 return -EFAULT; 66 66 67 67 err = __put_user(termios->c_iflag, &termio->c_iflag);
+4 -8
arch/mips/include/asm/uaccess.h
··· 109 109 110 110 /* 111 111 * access_ok: - Checks if a user space pointer is valid 112 - * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 113 - * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 114 - * to write to a block, it is always safe to read from it. 115 112 * @addr: User space pointer to start of block to check 116 113 * @size: Size of block to check 117 114 * ··· 131 134 return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0; 132 135 } 133 136 134 - #define access_ok(type, addr, size) \ 137 + #define access_ok(addr, size) \ 135 138 likely(__access_ok((addr), (size))) 136 139 137 140 /* ··· 301 304 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ 302 305 \ 303 306 might_fault(); \ 304 - if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \ 307 + if (likely(access_ok( __gu_ptr, size))) { \ 305 308 if (eva_kernel_access()) \ 306 309 __get_kernel_common((x), size, __gu_ptr); \ 307 310 else \ ··· 443 446 int __pu_err = -EFAULT; \ 444 447 \ 445 448 might_fault(); \ 446 - if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ 449 + if (likely(access_ok( __pu_addr, size))) { \ 447 450 if (eva_kernel_access()) \ 448 451 __put_kernel_common(__pu_addr, size); \ 449 452 else \ ··· 688 691 ({ \ 689 692 void __user * __cl_addr = (addr); \ 690 693 unsigned long __cl_size = (n); \ 691 - if (__cl_size && access_ok(VERIFY_WRITE, \ 692 - __cl_addr, __cl_size)) \ 694 + if (__cl_size && access_ok(__cl_addr, __cl_size)) \ 693 695 __cl_size = __clear_user(__cl_addr, __cl_size); \ 694 696 __cl_size; \ 695 697 })
+12 -12
arch/mips/kernel/mips-r2-to-r6-emul.c
··· 1205 1205 case lwl_op: 1206 1206 rt = regs->regs[MIPSInst_RT(inst)]; 1207 1207 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1208 - if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { 1208 + if (!access_ok((void __user *)vaddr, 4)) { 1209 1209 current->thread.cp0_baduaddr = vaddr; 1210 1210 err = SIGSEGV; 1211 1211 break; ··· 1278 1278 case lwr_op: 1279 1279 rt = regs->regs[MIPSInst_RT(inst)]; 1280 1280 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1281 - if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { 1281 + if (!access_ok((void __user *)vaddr, 4)) { 1282 1282 current->thread.cp0_baduaddr = vaddr; 1283 1283 err = SIGSEGV; 1284 1284 break; ··· 1352 1352 case swl_op: 1353 1353 rt = regs->regs[MIPSInst_RT(inst)]; 1354 1354 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1355 - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { 1355 + if (!access_ok((void __user *)vaddr, 4)) { 1356 1356 current->thread.cp0_baduaddr = vaddr; 1357 1357 err = SIGSEGV; 1358 1358 break; ··· 1422 1422 case swr_op: 1423 1423 rt = regs->regs[MIPSInst_RT(inst)]; 1424 1424 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1425 - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { 1425 + if (!access_ok((void __user *)vaddr, 4)) { 1426 1426 current->thread.cp0_baduaddr = vaddr; 1427 1427 err = SIGSEGV; 1428 1428 break; ··· 1497 1497 1498 1498 rt = regs->regs[MIPSInst_RT(inst)]; 1499 1499 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1500 - if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { 1500 + if (!access_ok((void __user *)vaddr, 8)) { 1501 1501 current->thread.cp0_baduaddr = vaddr; 1502 1502 err = SIGSEGV; 1503 1503 break; ··· 1616 1616 1617 1617 rt = regs->regs[MIPSInst_RT(inst)]; 1618 1618 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1619 - if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { 1619 + if (!access_ok((void __user *)vaddr, 8)) { 1620 1620 current->thread.cp0_baduaddr = vaddr; 1621 1621 err = SIGSEGV; 1622 1622 break; ··· 1735 1735 1736 1736 rt = regs->regs[MIPSInst_RT(inst)]; 1737 1737 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1738 - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { 1738 + if (!access_ok((void __user *)vaddr, 8)) { 1739 1739 current->thread.cp0_baduaddr = vaddr; 1740 1740 err = SIGSEGV; 1741 1741 break; ··· 1853 1853 1854 1854 rt = regs->regs[MIPSInst_RT(inst)]; 1855 1855 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1856 - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { 1856 + if (!access_ok((void __user *)vaddr, 8)) { 1857 1857 current->thread.cp0_baduaddr = vaddr; 1858 1858 err = SIGSEGV; 1859 1859 break; ··· 1970 1970 err = SIGBUS; 1971 1971 break; 1972 1972 } 1973 - if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { 1973 + if (!access_ok((void __user *)vaddr, 4)) { 1974 1974 current->thread.cp0_baduaddr = vaddr; 1975 1975 err = SIGBUS; 1976 1976 break; ··· 2026 2026 err = SIGBUS; 2027 2027 break; 2028 2028 } 2029 - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { 2029 + if (!access_ok((void __user *)vaddr, 4)) { 2030 2030 current->thread.cp0_baduaddr = vaddr; 2031 2031 err = SIGBUS; 2032 2032 break; ··· 2089 2089 err = SIGBUS; 2090 2090 break; 2091 2091 } 2092 - if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { 2092 + if (!access_ok((void __user *)vaddr, 8)) { 2093 2093 current->thread.cp0_baduaddr = vaddr; 2094 2094 err = SIGBUS; 2095 2095 break; ··· 2150 2150 err = SIGBUS; 2151 2151 break; 2152 2152 } 2153 - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { 2153 + if (!access_ok((void __user *)vaddr, 8)) { 2154 2154 current->thread.cp0_baduaddr = vaddr; 2155 2155 err = SIGBUS; 2156 2156 break;
+6 -6
arch/mips/kernel/ptrace.c
··· 71 71 struct pt_regs *regs; 72 72 int i; 73 73 74 - if (!access_ok(VERIFY_WRITE, data, 38 * 8)) 74 + if (!access_ok(data, 38 * 8)) 75 75 return -EIO; 76 76 77 77 regs = task_pt_regs(child); ··· 98 98 struct pt_regs *regs; 99 99 int i; 100 100 101 - if (!access_ok(VERIFY_READ, data, 38 * 8)) 101 + if (!access_ok(data, 38 * 8)) 102 102 return -EIO; 103 103 104 104 regs = task_pt_regs(child); ··· 125 125 126 126 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 127 127 return -EIO; 128 - if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) 128 + if (!access_ok(addr, sizeof(struct pt_watch_regs))) 129 129 return -EIO; 130 130 131 131 #ifdef CONFIG_32BIT ··· 167 167 168 168 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 169 169 return -EIO; 170 - if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) 170 + if (!access_ok(addr, sizeof(struct pt_watch_regs))) 171 171 return -EIO; 172 172 /* Check the values. */ 173 173 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { ··· 359 359 { 360 360 int i; 361 361 362 - if (!access_ok(VERIFY_WRITE, data, 33 * 8)) 362 + if (!access_ok(data, 33 * 8)) 363 363 return -EIO; 364 364 365 365 if (tsk_used_math(child)) { ··· 385 385 u32 value; 386 386 int i; 387 387 388 - if (!access_ok(VERIFY_READ, data, 33 * 8)) 388 + if (!access_ok(data, 33 * 8)) 389 389 return -EIO; 390 390 391 391 init_fp_ctx(child);
+6 -6
arch/mips/kernel/signal.c
··· 590 590 if (act) { 591 591 old_sigset_t mask; 592 592 593 - if (!access_ok(VERIFY_READ, act, sizeof(*act))) 593 + if (!access_ok(act, sizeof(*act))) 594 594 return -EFAULT; 595 595 err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); 596 596 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); ··· 604 604 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 605 605 606 606 if (!ret && oact) { 607 - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 607 + if (!access_ok(oact, sizeof(*oact))) 608 608 return -EFAULT; 609 609 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 610 610 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); ··· 630 630 631 631 regs = current_pt_regs(); 632 632 frame = (struct sigframe __user *)regs->regs[29]; 633 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 633 + if (!access_ok(frame, sizeof(*frame))) 634 634 goto badframe; 635 635 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) 636 636 goto badframe; ··· 667 667 668 668 regs = current_pt_regs(); 669 669 frame = (struct rt_sigframe __user *)regs->regs[29]; 670 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 670 + if (!access_ok(frame, sizeof(*frame))) 671 671 goto badframe; 672 672 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) 673 673 goto badframe; ··· 705 705 int err = 0; 706 706 707 707 frame = get_sigframe(ksig, regs, sizeof(*frame)); 708 - if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 708 + if (!access_ok(frame, sizeof (*frame))) 709 709 return -EFAULT; 710 710 711 711 err |= setup_sigcontext(regs, &frame->sf_sc); ··· 744 744 int err = 0; 745 745 746 746 frame = get_sigframe(ksig, regs, sizeof(*frame)); 747 - if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 747 + if (!access_ok(frame, sizeof (*frame))) 748 748 return -EFAULT; 749 749 750 750 /* Create siginfo. */
+2 -2
arch/mips/kernel/signal32.c
··· 46 46 old_sigset_t mask; 47 47 s32 handler; 48 48 49 - if (!access_ok(VERIFY_READ, act, sizeof(*act))) 49 + if (!access_ok(act, sizeof(*act))) 50 50 return -EFAULT; 51 51 err |= __get_user(handler, &act->sa_handler); 52 52 new_ka.sa.sa_handler = (void __user *)(s64)handler; ··· 61 61 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 62 62 63 63 if (!ret && oact) { 64 - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 64 + if (!access_ok(oact, sizeof(*oact))) 65 65 return -EFAULT; 66 66 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 67 67 err |= __put_user((u32)(u64)old_ka.sa.sa_handler,
+2 -2
arch/mips/kernel/signal_n32.c
··· 73 73 74 74 regs = current_pt_regs(); 75 75 frame = (struct rt_sigframe_n32 __user *)regs->regs[29]; 76 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 76 + if (!access_ok(frame, sizeof(*frame))) 77 77 goto badframe; 78 78 if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) 79 79 goto badframe; ··· 110 110 int err = 0; 111 111 112 112 frame = get_sigframe(ksig, regs, sizeof(*frame)); 113 - if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 113 + if (!access_ok(frame, sizeof (*frame))) 114 114 return -EFAULT; 115 115 116 116 /* Create siginfo. */
+4 -4
arch/mips/kernel/signal_o32.c
··· 118 118 int err = 0; 119 119 120 120 frame = get_sigframe(ksig, regs, sizeof(*frame)); 121 - if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 121 + if (!access_ok(frame, sizeof (*frame))) 122 122 return -EFAULT; 123 123 124 124 err |= setup_sigcontext32(regs, &frame->sf_sc); ··· 160 160 161 161 regs = current_pt_regs(); 162 162 frame = (struct rt_sigframe32 __user *)regs->regs[29]; 163 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 163 + if (!access_ok(frame, sizeof(*frame))) 164 164 goto badframe; 165 165 if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) 166 166 goto badframe; ··· 197 197 int err = 0; 198 198 199 199 frame = get_sigframe(ksig, regs, sizeof(*frame)); 200 - if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 200 + if (!access_ok(frame, sizeof (*frame))) 201 201 return -EFAULT; 202 202 203 203 /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ ··· 262 262 263 263 regs = current_pt_regs(); 264 264 frame = (struct sigframe32 __user *)regs->regs[29]; 265 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 265 + if (!access_ok(frame, sizeof(*frame))) 266 266 goto badframe; 267 267 if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask)) 268 268 goto badframe;
+1 -1
arch/mips/kernel/syscall.c
··· 101 101 if (unlikely(addr & 3)) 102 102 return -EINVAL; 103 103 104 - if (unlikely(!access_ok(VERIFY_WRITE, (const void __user *)addr, 4))) 104 + if (unlikely(!access_ok((const void __user *)addr, 4))) 105 105 return -EINVAL; 106 106 107 107 if (cpu_has_llsc && R10000_LLSC_WAR) {
+47 -51
arch/mips/kernel/unaligned.c
··· 936 936 if (insn.dsp_format.func == lx_op) { 937 937 switch (insn.dsp_format.op) { 938 938 case lwx_op: 939 - if (!access_ok(VERIFY_READ, addr, 4)) 939 + if (!access_ok(addr, 4)) 940 940 goto sigbus; 941 941 LoadW(addr, value, res); 942 942 if (res) ··· 945 945 regs->regs[insn.dsp_format.rd] = value; 946 946 break; 947 947 case lhx_op: 948 - if (!access_ok(VERIFY_READ, addr, 2)) 948 + if (!access_ok(addr, 2)) 949 949 goto sigbus; 950 950 LoadHW(addr, value, res); 951 951 if (res) ··· 968 968 set_fs(USER_DS); 969 969 switch (insn.spec3_format.func) { 970 970 case lhe_op: 971 - if (!access_ok(VERIFY_READ, addr, 2)) { 971 + if (!access_ok(addr, 2)) { 972 972 set_fs(seg); 973 973 goto sigbus; 974 974 } ··· 981 981 regs->regs[insn.spec3_format.rt] = value; 982 982 break; 983 983 case lwe_op: 984 - if (!access_ok(VERIFY_READ, addr, 4)) { 984 + if (!access_ok(addr, 4)) { 985 985 set_fs(seg); 986 986 goto sigbus; 987 987 } ··· 994 994 regs->regs[insn.spec3_format.rt] = value; 995 995 break; 996 996 case lhue_op: 997 - if (!access_ok(VERIFY_READ, addr, 2)) { 997 + if (!access_ok(addr, 2)) { 998 998 set_fs(seg); 999 999 goto sigbus; 1000 1000 } ··· 1007 1007 regs->regs[insn.spec3_format.rt] = value; 1008 1008 break; 1009 1009 case she_op: 1010 - if (!access_ok(VERIFY_WRITE, addr, 2)) { 1010 + if (!access_ok(addr, 2)) { 1011 1011 set_fs(seg); 1012 1012 goto sigbus; 1013 1013 } ··· 1020 1020 } 1021 1021 break; 1022 1022 case swe_op: 1023 - if (!access_ok(VERIFY_WRITE, addr, 4)) { 1023 + if (!access_ok(addr, 4)) { 1024 1024 set_fs(seg); 1025 1025 goto sigbus; 1026 1026 } ··· 1041 1041 #endif 1042 1042 break; 1043 1043 case lh_op: 1044 - if (!access_ok(VERIFY_READ, addr, 2)) 1044 + if (!access_ok(addr, 2)) 1045 1045 goto sigbus; 1046 1046 1047 1047 if (IS_ENABLED(CONFIG_EVA)) { ··· 1060 1060 break; 1061 1061 1062 1062 case lw_op: 1063 - if (!access_ok(VERIFY_READ, addr, 4)) 1063 + if (!access_ok(addr, 4)) 1064 1064 goto sigbus; 1065 1065 1066 1066 if (IS_ENABLED(CONFIG_EVA)) { ··· 1079 1079 break; 1080 1080 1081 1081 case lhu_op: 1082 - if (!access_ok(VERIFY_READ, addr, 2)) 1082 + if (!access_ok(addr, 2)) 1083 1083 goto sigbus; 1084 1084 1085 1085 if (IS_ENABLED(CONFIG_EVA)) { ··· 1106 1106 * would blow up, so for now we don't handle unaligned 64-bit 1107 1107 * instructions on 32-bit kernels. 1108 1108 */ 1109 - if (!access_ok(VERIFY_READ, addr, 4)) 1109 + if (!access_ok(addr, 4)) 1110 1110 goto sigbus; 1111 1111 1112 1112 LoadWU(addr, value, res); ··· 1129 1129 * would blow up, so for now we don't handle unaligned 64-bit 1130 1130 * instructions on 32-bit kernels. 1131 1131 */ 1132 - if (!access_ok(VERIFY_READ, addr, 8)) 1132 + if (!access_ok(addr, 8)) 1133 1133 goto sigbus; 1134 1134 1135 1135 LoadDW(addr, value, res); ··· 1144 1144 goto sigill; 1145 1145 1146 1146 case sh_op: 1147 - if (!access_ok(VERIFY_WRITE, addr, 2)) 1147 + if (!access_ok(addr, 2)) 1148 1148 goto sigbus; 1149 1149 1150 1150 compute_return_epc(regs); ··· 1164 1164 break; 1165 1165 1166 1166 case sw_op: 1167 - if (!access_ok(VERIFY_WRITE, addr, 4)) 1167 + if (!access_ok(addr, 4)) 1168 1168 goto sigbus; 1169 1169 1170 1170 compute_return_epc(regs); ··· 1192 1192 * would blow up, so for now we don't handle unaligned 64-bit 1193 1193 * instructions on 32-bit kernels. 1194 1194 */ 1195 - if (!access_ok(VERIFY_WRITE, addr, 8)) 1195 + if (!access_ok(addr, 8)) 1196 1196 goto sigbus; 1197 1197 1198 1198 compute_return_epc(regs); ··· 1254 1254 1255 1255 switch (insn.msa_mi10_format.func) { 1256 1256 case msa_ld_op: 1257 - if (!access_ok(VERIFY_READ, addr, sizeof(*fpr))) 1257 + if (!access_ok(addr, sizeof(*fpr))) 1258 1258 goto sigbus; 1259 1259 1260 1260 do { ··· 1290 1290 break; 1291 1291 1292 1292 case msa_st_op: 1293 - if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr))) 1293 + if (!access_ok(addr, sizeof(*fpr))) 1294 1294 goto sigbus; 1295 1295 1296 1296 /* ··· 1463 1463 if (reg == 31) 1464 1464 goto sigbus; 1465 1465 1466 - if (!access_ok(VERIFY_READ, addr, 8)) 1466 + if (!access_ok(addr, 8)) 1467 1467 goto sigbus; 1468 1468 1469 1469 LoadW(addr, value, res); ··· 1482 1482 if (reg == 31) 1483 1483 goto sigbus; 1484 1484 1485 - if (!access_ok(VERIFY_WRITE, addr, 8)) 1485 + if (!access_ok(addr, 8)) 1486 1486 goto sigbus; 1487 1487 1488 1488 value = regs->regs[reg]; ··· 1502 1502 if (reg == 31) 1503 1503 goto sigbus; 1504 1504 1505 - if (!access_ok(VERIFY_READ, addr, 16)) 1505 + if (!access_ok(addr, 16)) 1506 1506 goto sigbus; 1507 1507 1508 1508 LoadDW(addr, value, res); ··· 1525 1525 if (reg == 31) 1526 1526 goto sigbus; 1527 1527 1528 - if (!access_ok(VERIFY_WRITE, addr, 16)) 1528 + if (!access_ok(addr, 16)) 1529 1529 goto sigbus; 1530 1530 1531 1531 value = regs->regs[reg]; ··· 1548 1548 if ((rvar > 9) || !reg) 1549 1549 goto sigill; 1550 1550 if (reg & 0x10) { 1551 - if (!access_ok 1552 - (VERIFY_READ, addr, 4 * (rvar + 1))) 1551 + if (!access_ok(addr, 4 * (rvar + 1))) 1553 1552 goto sigbus; 1554 1553 } else { 1555 - if (!access_ok(VERIFY_READ, addr, 4 * rvar)) 1554 + if (!access_ok(addr, 4 * rvar)) 1556 1555 goto sigbus; 1557 1556 } 1558 1557 if (rvar == 9) ··· 1584 1585 if ((rvar > 9) || !reg) 1585 1586 goto sigill; 1586 1587 if (reg & 0x10) { 1587 - if (!access_ok 1588 - (VERIFY_WRITE, addr, 4 * (rvar + 1))) 1588 + if (!access_ok(addr, 4 * (rvar + 1))) 1589 1589 goto sigbus; 1590 1590 } else { 1591 - if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) 1591 + if (!access_ok(addr, 4 * rvar)) 1592 1592 goto sigbus; 1593 1593 } 1594 1594 if (rvar == 9) ··· 1621 1623 if ((rvar > 9) || !reg) 1622 1624 goto sigill; 1623 1625 if (reg & 0x10) { 1624 - if (!access_ok 1625 - (VERIFY_READ, addr, 8 * (rvar + 1))) 1626 + if (!access_ok(addr, 8 * (rvar + 1))) 1626 1627 goto sigbus; 1627 1628 } else { 1628 - if (!access_ok(VERIFY_READ, addr, 8 * rvar)) 1629 + if (!access_ok(addr, 8 * rvar)) 1629 1630 goto sigbus; 1630 1631 } 1631 1632 if (rvar == 9) ··· 1662 1665 if ((rvar > 9) || !reg) 1663 1666 goto sigill; 1664 1667 if (reg & 0x10) { 1665 - if (!access_ok 1666 - (VERIFY_WRITE, addr, 8 * (rvar + 1))) 1668 + if (!access_ok(addr, 8 * (rvar + 1))) 1667 1669 goto sigbus; 1668 1670 } else { 1669 - if (!access_ok(VERIFY_WRITE, addr, 8 * rvar)) 1671 + if (!access_ok(addr, 8 * rvar)) 1670 1672 goto sigbus; 1671 1673 } 1672 1674 if (rvar == 9) ··· 1784 1788 case mm_lwm16_op: 1785 1789 reg = insn.mm16_m_format.rlist; 1786 1790 rvar = reg + 1; 1787 - if (!access_ok(VERIFY_READ, addr, 4 * rvar)) 1791 + if (!access_ok(addr, 4 * rvar)) 1788 1792 goto sigbus; 1789 1793 1790 1794 for (i = 16; rvar; rvar--, i++) { ··· 1804 1808 case mm_swm16_op: 1805 1809 reg = insn.mm16_m_format.rlist; 1806 1810 rvar = reg + 1; 1807 - if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) 1811 + if (!access_ok(addr, 4 * rvar)) 1808 1812 goto sigbus; 1809 1813 1810 1814 for (i = 16; rvar; rvar--, i++) { ··· 1858 1862 } 1859 1863 1860 1864 loadHW: 1861 - if (!access_ok(VERIFY_READ, addr, 2)) 1865 + if (!access_ok(addr, 2)) 1862 1866 goto sigbus; 1863 1867 1864 1868 LoadHW(addr, value, res); ··· 1868 1872 goto success; 1869 1873 1870 1874 loadHWU: 1871 - if (!access_ok(VERIFY_READ, addr, 2)) 1875 + if (!access_ok(addr, 2)) 1872 1876 goto sigbus; 1873 1877 1874 1878 LoadHWU(addr, value, res); ··· 1878 1882 goto success; 1879 1883 1880 1884 loadW: 1881 - if (!access_ok(VERIFY_READ, addr, 4)) 1885 + if (!access_ok(addr, 4)) 1882 1886 goto sigbus; 1883 1887 1884 1888 LoadW(addr, value, res); ··· 1896 1900 * would blow up, so for now we don't handle unaligned 64-bit 1897 1901 * instructions on 32-bit kernels. 1898 1902 */ 1899 - if (!access_ok(VERIFY_READ, addr, 4)) 1903 + if (!access_ok(addr, 4)) 1900 1904 goto sigbus; 1901 1905 1902 1906 LoadWU(addr, value, res); ··· 1918 1922 * would blow up, so for now we don't handle unaligned 64-bit 1919 1923 * instructions on 32-bit kernels. 1920 1924 */ 1921 - if (!access_ok(VERIFY_READ, addr, 8)) 1925 + if (!access_ok(addr, 8)) 1922 1926 goto sigbus; 1923 1927 1924 1928 LoadDW(addr, value, res); ··· 1932 1936 goto sigill; 1933 1937 1934 1938 storeHW: 1935 - if (!access_ok(VERIFY_WRITE, addr, 2)) 1939 + if (!access_ok(addr, 2)) 1936 1940 goto sigbus; 1937 1941 1938 1942 value = regs->regs[reg]; ··· 1942 1946 goto success; 1943 1947 1944 1948 storeW: 1945 - if (!access_ok(VERIFY_WRITE, addr, 4)) 1949 + if (!access_ok(addr, 4)) 1946 1950 goto sigbus; 1947 1951 1948 1952 value = regs->regs[reg]; ··· 1960 1964 * would blow up, so for now we don't handle unaligned 64-bit 1961 1965 * instructions on 32-bit kernels. 1962 1966 */ 1963 - if (!access_ok(VERIFY_WRITE, addr, 8)) 1967 + if (!access_ok(addr, 8)) 1964 1968 goto sigbus; 1965 1969 1966 1970 value = regs->regs[reg]; ··· 2118 2122 goto sigbus; 2119 2123 2120 2124 case MIPS16e_lh_op: 2121 - if (!access_ok(VERIFY_READ, addr, 2)) 2125 + if (!access_ok(addr, 2)) 2122 2126 goto sigbus; 2123 2127 2124 2128 LoadHW(addr, value, res); ··· 2129 2133 break; 2130 2134 2131 2135 case MIPS16e_lhu_op: 2132 - if (!access_ok(VERIFY_READ, addr, 2)) 2136 + if (!access_ok(addr, 2)) 2133 2137 goto sigbus; 2134 2138 2135 2139 LoadHWU(addr, value, res); ··· 2142 2146 case MIPS16e_lw_op: 2143 2147 case MIPS16e_lwpc_op: 2144 2148 case MIPS16e_lwsp_op: 2145 - if (!access_ok(VERIFY_READ, addr, 4)) 2149 + if (!access_ok(addr, 4)) 2146 2150 goto sigbus; 2147 2151 2148 2152 LoadW(addr, value, res); ··· 2161 2165 * would blow up, so for now we don't handle unaligned 64-bit 2162 2166 * instructions on 32-bit kernels. 2163 2167 */ 2164 - if (!access_ok(VERIFY_READ, addr, 4)) 2168 + if (!access_ok(addr, 4)) 2165 2169 goto sigbus; 2166 2170 2167 2171 LoadWU(addr, value, res); ··· 2185 2189 * would blow up, so for now we don't handle unaligned 64-bit 2186 2190 * instructions on 32-bit kernels. 2187 2191 */ 2188 - if (!access_ok(VERIFY_READ, addr, 8)) 2192 + if (!access_ok(addr, 8)) 2189 2193 goto sigbus; 2190 2194 2191 2195 LoadDW(addr, value, res); ··· 2200 2204 goto sigill; 2201 2205 2202 2206 case MIPS16e_sh_op: 2203 - if (!access_ok(VERIFY_WRITE, addr, 2)) 2207 + if (!access_ok(addr, 2)) 2204 2208 goto sigbus; 2205 2209 2206 2210 MIPS16e_compute_return_epc(regs, &oldinst); ··· 2213 2217 case MIPS16e_sw_op: 2214 2218 case MIPS16e_swsp_op: 2215 2219 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ 2216 - if (!access_ok(VERIFY_WRITE, addr, 4)) 2220 + if (!access_ok(addr, 4)) 2217 2221 goto sigbus; 2218 2222 2219 2223 MIPS16e_compute_return_epc(regs, &oldinst); ··· 2233 2237 * would blow up, so for now we don't handle unaligned 64-bit 2234 2238 * instructions on 32-bit kernels. 2235 2239 */ 2236 - if (!access_ok(VERIFY_WRITE, addr, 8)) 2240 + if (!access_ok(addr, 8)) 2237 2241 goto sigbus; 2238 2242 2239 2243 MIPS16e_compute_return_epc(regs, &oldinst);
+8 -8
arch/mips/math-emu/cp1emu.c
··· 1063 1063 MIPSInst_SIMM(ir)); 1064 1064 MIPS_FPU_EMU_INC_STATS(loads); 1065 1065 1066 - if (!access_ok(VERIFY_READ, dva, sizeof(u64))) { 1066 + if (!access_ok(dva, sizeof(u64))) { 1067 1067 MIPS_FPU_EMU_INC_STATS(errors); 1068 1068 *fault_addr = dva; 1069 1069 return SIGBUS; ··· 1081 1081 MIPSInst_SIMM(ir)); 1082 1082 MIPS_FPU_EMU_INC_STATS(stores); 1083 1083 DIFROMREG(dval, MIPSInst_RT(ir)); 1084 - if (!access_ok(VERIFY_WRITE, dva, sizeof(u64))) { 1084 + if (!access_ok(dva, sizeof(u64))) { 1085 1085 MIPS_FPU_EMU_INC_STATS(errors); 1086 1086 *fault_addr = dva; 1087 1087 return SIGBUS; ··· 1097 1097 wva = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] + 1098 1098 MIPSInst_SIMM(ir)); 1099 1099 MIPS_FPU_EMU_INC_STATS(loads); 1100 - if (!access_ok(VERIFY_READ, wva, sizeof(u32))) { 1100 + if (!access_ok(wva, sizeof(u32))) { 1101 1101 MIPS_FPU_EMU_INC_STATS(errors); 1102 1102 *fault_addr = wva; 1103 1103 return SIGBUS; ··· 1115 1115 MIPSInst_SIMM(ir)); 1116 1116 MIPS_FPU_EMU_INC_STATS(stores); 1117 1117 SIFROMREG(wval, MIPSInst_RT(ir)); 1118 - if (!access_ok(VERIFY_WRITE, wva, sizeof(u32))) { 1118 + if (!access_ok(wva, sizeof(u32))) { 1119 1119 MIPS_FPU_EMU_INC_STATS(errors); 1120 1120 *fault_addr = wva; 1121 1121 return SIGBUS; ··· 1493 1493 xcp->regs[MIPSInst_FT(ir)]); 1494 1494 1495 1495 MIPS_FPU_EMU_INC_STATS(loads); 1496 - if (!access_ok(VERIFY_READ, va, sizeof(u32))) { 1496 + if (!access_ok(va, sizeof(u32))) { 1497 1497 MIPS_FPU_EMU_INC_STATS(errors); 1498 1498 *fault_addr = va; 1499 1499 return SIGBUS; ··· 1513 1513 MIPS_FPU_EMU_INC_STATS(stores); 1514 1514 1515 1515 SIFROMREG(val, MIPSInst_FS(ir)); 1516 - if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) { 1516 + if (!access_ok(va, sizeof(u32))) { 1517 1517 MIPS_FPU_EMU_INC_STATS(errors); 1518 1518 *fault_addr = va; 1519 1519 return SIGBUS; ··· 1590 1590 xcp->regs[MIPSInst_FT(ir)]); 1591 1591 1592 1592 MIPS_FPU_EMU_INC_STATS(loads); 1593 - if (!access_ok(VERIFY_READ, va, sizeof(u64))) { 1593 + if (!access_ok(va, sizeof(u64))) { 1594 1594 MIPS_FPU_EMU_INC_STATS(errors); 1595 1595 *fault_addr = va; 1596 1596 return SIGBUS; ··· 1609 1609 1610 1610 MIPS_FPU_EMU_INC_STATS(stores); 1611 1611 DIFROMREG(val, MIPSInst_FS(ir)); 1612 - if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) { 1612 + if (!access_ok(va, sizeof(u64))) { 1613 1613 MIPS_FPU_EMU_INC_STATS(errors); 1614 1614 *fault_addr = va; 1615 1615 return SIGBUS;
+1 -1
arch/mips/mm/cache.c
··· 76 76 { 77 77 if (bytes == 0) 78 78 return 0; 79 - if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes)) 79 + if (!access_ok((void __user *) addr, bytes)) 80 80 return -EFAULT; 81 81 82 82 __flush_icache_user_range(addr, addr + bytes);
+1 -2
arch/mips/mm/gup.c
··· 195 195 addr = start; 196 196 len = (unsigned long) nr_pages << PAGE_SHIFT; 197 197 end = start + len; 198 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, 199 - (void __user *)start, len))) 198 + if (unlikely(!access_ok((void __user *)start, len))) 200 199 return 0; 201 200 202 201 /*
+1 -1
arch/mips/oprofile/backtrace.c
··· 19 19 static inline int get_mem(unsigned long addr, unsigned long *result) 20 20 { 21 21 unsigned long *address = (unsigned long *) addr; 22 - if (!access_ok(VERIFY_READ, address, sizeof(unsigned long))) 22 + if (!access_ok(address, sizeof(unsigned long))) 23 23 return -1; 24 24 if (__copy_from_user_inatomic(result, address, sizeof(unsigned long))) 25 25 return -3;
+1 -1
arch/mips/sibyte/common/sb_tbprof.c
··· 458 458 char *dest = buf; 459 459 long cur_off = *offp; 460 460 461 - if (!access_ok(VERIFY_WRITE, buf, size)) 461 + if (!access_ok(buf, size)) 462 462 return -EFAULT; 463 463 464 464 mutex_lock(&sbp.lock);
+1 -1
arch/nds32/include/asm/futex.h
··· 40 40 int ret = 0; 41 41 u32 val, tmp, flags; 42 42 43 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 43 + if (!access_ok(uaddr, sizeof(u32))) 44 44 return -EFAULT; 45 45 46 46 smp_mb();
+4 -7
arch/nds32/include/asm/uaccess.h
··· 13 13 #include <asm/types.h> 14 14 #include <linux/mm.h> 15 15 16 - #define VERIFY_READ 0 17 - #define VERIFY_WRITE 1 18 - 19 16 #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 20 17 21 18 /* ··· 50 53 51 54 #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) 52 55 53 - #define access_ok(type, addr, size) \ 56 + #define access_ok(addr, size) \ 54 57 __range_ok((unsigned long)addr, (unsigned long)size) 55 58 /* 56 59 * Single-value transfer routines. They automatically use the right ··· 91 94 ({ \ 92 95 const __typeof__(*(ptr)) __user *__p = (ptr); \ 93 96 might_fault(); \ 94 - if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ 97 + if (access_ok(__p, sizeof(*__p))) { \ 95 98 __get_user_err((x), __p, (err)); \ 96 99 } else { \ 97 100 (x) = 0; (err) = -EFAULT; \ ··· 186 189 ({ \ 187 190 __typeof__(*(ptr)) __user *__p = (ptr); \ 188 191 might_fault(); \ 189 - if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ 192 + if (access_ok(__p, sizeof(*__p))) { \ 190 193 __put_user_err((x), __p, (err)); \ 191 194 } else { \ 192 195 (err) = -EFAULT; \ ··· 276 279 #define INLINE_COPY_TO_USER 277 280 static inline unsigned long clear_user(void __user * to, unsigned long n) 278 281 { 279 - if (access_ok(VERIFY_WRITE, to, n)) 282 + if (access_ok(to, n)) 280 283 n = __arch_clear_user(to, n); 281 284 return n; 282 285 }
+5 -6
arch/nds32/kernel/perf_event_cpu.c
··· 1306 1306 (unsigned long *)(fp - (unsigned long)sizeof(buftail)); 1307 1307 1308 1308 /* Check accessibility of one struct frame_tail beyond */ 1309 - if (!access_ok(VERIFY_READ, user_frame_tail, sizeof(buftail))) 1309 + if (!access_ok(user_frame_tail, sizeof(buftail))) 1310 1310 return 0; 1311 1311 if (__copy_from_user_inatomic 1312 1312 (&buftail, user_frame_tail, sizeof(buftail))) ··· 1332 1332 (unsigned long *)(fp - (unsigned long)sizeof(buftail)); 1333 1333 1334 1334 /* Check accessibility of one struct frame_tail beyond */ 1335 - if (!access_ok(VERIFY_READ, user_frame_tail, sizeof(buftail))) 1335 + if (!access_ok(user_frame_tail, sizeof(buftail))) 1336 1336 return 0; 1337 1337 if (__copy_from_user_inatomic 1338 1338 (&buftail, user_frame_tail, sizeof(buftail))) ··· 1386 1386 user_frame_tail = 1387 1387 (unsigned long *)(fp - (unsigned long)sizeof(fp)); 1388 1388 1389 - if (!access_ok(VERIFY_READ, user_frame_tail, sizeof(fp))) 1389 + if (!access_ok(user_frame_tail, sizeof(fp))) 1390 1390 return; 1391 1391 1392 1392 if (__copy_from_user_inatomic ··· 1406 1406 (unsigned long *)(fp - 1407 1407 (unsigned long)sizeof(buftail)); 1408 1408 1409 - if (!access_ok 1410 - (VERIFY_READ, user_frame_tail, sizeof(buftail))) 1409 + if (!access_ok(user_frame_tail, sizeof(buftail))) 1411 1410 return; 1412 1411 1413 1412 if (__copy_from_user_inatomic ··· 1423 1424 (unsigned long *)(fp - (unsigned long) 1424 1425 sizeof(buftail_opt_size)); 1425 1426 1426 - if (!access_ok(VERIFY_READ, user_frame_tail, 1427 + if (!access_ok(user_frame_tail, 1427 1428 sizeof(buftail_opt_size))) 1428 1429 return; 1429 1430
+2 -2
arch/nds32/kernel/signal.c
··· 151 151 152 152 frame = (struct rt_sigframe __user *)regs->sp; 153 153 154 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 154 + if (!access_ok(frame, sizeof(*frame))) 155 155 goto badframe; 156 156 157 157 if (restore_sigframe(regs, frame)) ··· 275 275 get_sigframe(ksig, regs, sizeof(*frame)); 276 276 int err = 0; 277 277 278 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 278 + if (!access_ok(frame, sizeof(*frame))) 279 279 return -EFAULT; 280 280 281 281 __put_user_error(0, &frame->uc.uc_flags, err);
+4 -4
arch/nds32/mm/alignment.c
··· 289 289 unaligned_addr += shift; 290 290 291 291 if (load) { 292 - if (!access_ok(VERIFY_READ, (void *)unaligned_addr, len)) 292 + if (!access_ok((void *)unaligned_addr, len)) 293 293 return -EACCES; 294 294 295 295 get_data(unaligned_addr, &target_val, len); 296 296 *idx_to_addr(regs, target_idx) = target_val; 297 297 } else { 298 - if (!access_ok(VERIFY_WRITE, (void *)unaligned_addr, len)) 298 + if (!access_ok((void *)unaligned_addr, len)) 299 299 return -EACCES; 300 300 target_val = *idx_to_addr(regs, target_idx); 301 301 set_data((void *)unaligned_addr, target_val, len); ··· 479 479 480 480 if (load) { 481 481 482 - if (!access_ok(VERIFY_READ, (void *)unaligned_addr, len)) 482 + if (!access_ok((void *)unaligned_addr, len)) 483 483 return -EACCES; 484 484 485 485 get_data(unaligned_addr, &target_val, len); ··· 491 491 *idx_to_addr(regs, RT(inst)) = target_val; 492 492 } else { 493 493 494 - if (!access_ok(VERIFY_WRITE, (void *)unaligned_addr, len)) 494 + if (!access_ok((void *)unaligned_addr, len)) 495 495 return -EACCES; 496 496 497 497 target_val = *idx_to_addr(regs, RT(inst));
+4 -4
arch/nios2/include/asm/uaccess.h
··· 37 37 (((signed long)(((long)get_fs().seg) & \ 38 38 ((long)(addr) | (((long)(addr)) + (len)) | (len)))) == 0) 39 39 40 - #define access_ok(type, addr, len) \ 40 + #define access_ok(addr, len) \ 41 41 likely(__access_ok((unsigned long)(addr), (unsigned long)(len))) 42 42 43 43 # define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n" ··· 70 70 static inline unsigned long __must_check clear_user(void __user *to, 71 71 unsigned long n) 72 72 { 73 - if (!access_ok(VERIFY_WRITE, to, n)) 73 + if (!access_ok(to, n)) 74 74 return n; 75 75 return __clear_user(to, n); 76 76 } ··· 142 142 long __gu_err = -EFAULT; \ 143 143 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ 144 144 unsigned long __gu_val = 0; \ 145 - if (access_ok(VERIFY_READ, __gu_ptr, sizeof(*__gu_ptr))) \ 145 + if (access_ok( __gu_ptr, sizeof(*__gu_ptr))) \ 146 146 __get_user_common(__gu_val, sizeof(*__gu_ptr), \ 147 147 __gu_ptr, __gu_err); \ 148 148 (x) = (__force __typeof__(x))__gu_val; \ ··· 168 168 long __pu_err = -EFAULT; \ 169 169 __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \ 170 170 __typeof__(*(ptr)) __pu_val = (__typeof(*ptr))(x); \ 171 - if (access_ok(VERIFY_WRITE, __pu_ptr, sizeof(*__pu_ptr))) { \ 171 + if (access_ok(__pu_ptr, sizeof(*__pu_ptr))) { \ 172 172 switch (sizeof(*__pu_ptr)) { \ 173 173 case 1: \ 174 174 __put_user_asm(__pu_val, "stb", __pu_ptr, __pu_err); \
+1 -1
arch/nios2/kernel/signal.c
··· 106 106 sigset_t set; 107 107 int rval; 108 108 109 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 109 + if (!access_ok(frame, sizeof(*frame))) 110 110 goto badframe; 111 111 112 112 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+1 -1
arch/openrisc/include/asm/futex.h
··· 72 72 int ret = 0; 73 73 u32 prev; 74 74 75 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 75 + if (!access_ok(uaddr, sizeof(u32))) 76 76 return -EFAULT; 77 77 78 78 __asm__ __volatile__ ( \
+4 -4
arch/openrisc/include/asm/uaccess.h
··· 58 58 /* Ensure that addr is below task's addr_limit */ 59 59 #define __addr_ok(addr) ((unsigned long) addr < get_fs()) 60 60 61 - #define access_ok(type, addr, size) \ 61 + #define access_ok(addr, size) \ 62 62 __range_ok((unsigned long)addr, (unsigned long)size) 63 63 64 64 /* ··· 102 102 ({ \ 103 103 long __pu_err = -EFAULT; \ 104 104 __typeof__(*(ptr)) *__pu_addr = (ptr); \ 105 - if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ 105 + if (access_ok(__pu_addr, size)) \ 106 106 __put_user_size((x), __pu_addr, (size), __pu_err); \ 107 107 __pu_err; \ 108 108 }) ··· 175 175 ({ \ 176 176 long __gu_err = -EFAULT, __gu_val = 0; \ 177 177 const __typeof__(*(ptr)) * __gu_addr = (ptr); \ 178 - if (access_ok(VERIFY_READ, __gu_addr, size)) \ 178 + if (access_ok(__gu_addr, size)) \ 179 179 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 180 180 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 181 181 __gu_err; \ ··· 254 254 static inline __must_check unsigned long 255 255 clear_user(void *addr, unsigned long size) 256 256 { 257 - if (likely(access_ok(VERIFY_WRITE, addr, size))) 257 + if (likely(access_ok(addr, size))) 258 258 size = __clear_user(addr, size); 259 259 return size; 260 260 }
+3 -3
arch/openrisc/kernel/signal.c
··· 50 50 51 51 /* 52 52 * Restore the regs from &sc->regs. 53 - * (sc is already checked for VERIFY_READ since the sigframe was 53 + * (sc is already checked since the sigframe was 54 54 * checked in sys_sigreturn previously) 55 55 */ 56 56 err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)); ··· 83 83 if (((long)frame) & 3) 84 84 goto badframe; 85 85 86 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 86 + if (!access_ok(frame, sizeof(*frame))) 87 87 goto badframe; 88 88 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 89 89 goto badframe; ··· 161 161 162 162 frame = get_sigframe(ksig, regs, sizeof(*frame)); 163 163 164 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 164 + if (!access_ok(frame, sizeof(*frame))) 165 165 return -EFAULT; 166 166 167 167 /* Create siginfo. */
+1 -1
arch/parisc/include/asm/futex.h
··· 95 95 if (uaccess_kernel() && !uaddr) 96 96 return -EFAULT; 97 97 98 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 98 + if (!access_ok(uaddr, sizeof(u32))) 99 99 return -EFAULT; 100 100 101 101 /* HPPA has no cmpxchg in hardware and therefore the
+1 -1
arch/parisc/include/asm/uaccess.h
··· 27 27 * that put_user is the same as __put_user, etc. 28 28 */ 29 29 30 - #define access_ok(type, uaddr, size) \ 30 + #define access_ok(uaddr, size) \ 31 31 ( (uaddr) == (uaddr) ) 32 32 33 33 #define put_user __put_user
+1 -1
arch/powerpc/include/asm/futex.h
··· 72 72 int ret = 0; 73 73 u32 prev; 74 74 75 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 75 + if (!access_ok(uaddr, sizeof(u32))) 76 76 return -EFAULT; 77 77 78 78 __asm__ __volatile__ (
+4 -4
arch/powerpc/include/asm/uaccess.h
··· 62 62 63 63 #endif 64 64 65 - #define access_ok(type, addr, size) \ 65 + #define access_ok(addr, size) \ 66 66 (__chk_user_ptr(addr), (void)(type), \ 67 67 __access_ok((__force unsigned long)(addr), (size), get_fs())) 68 68 ··· 166 166 long __pu_err = -EFAULT; \ 167 167 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 168 168 might_fault(); \ 169 - if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ 169 + if (access_ok(__pu_addr, size)) \ 170 170 __put_user_size((x), __pu_addr, (size), __pu_err); \ 171 171 __pu_err; \ 172 172 }) ··· 276 276 __long_type(*(ptr)) __gu_val = 0; \ 277 277 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 278 278 might_fault(); \ 279 - if (access_ok(VERIFY_READ, __gu_addr, (size))) { \ 279 + if (access_ok(__gu_addr, (size))) { \ 280 280 barrier_nospec(); \ 281 281 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 282 282 } \ ··· 374 374 static inline unsigned long clear_user(void __user *addr, unsigned long size) 375 375 { 376 376 might_fault(); 377 - if (likely(access_ok(VERIFY_WRITE, addr, size))) 377 + if (likely(access_ok(addr, size))) 378 378 return __clear_user(addr, size); 379 379 return size; 380 380 }
+1 -2
arch/powerpc/kernel/align.c
··· 131 131 132 132 /* Verify the address of the operand */ 133 133 if (unlikely(user_mode(regs) && 134 - !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ), 135 - addr, nb))) 134 + !access_ok(addr, nb))) 136 135 return -EFAULT; 137 136 138 137 /* userland only */
+1 -1
arch/powerpc/kernel/rtas_flash.c
··· 523 523 args_buf->status = VALIDATE_INCOMPLETE; 524 524 } 525 525 526 - if (!access_ok(VERIFY_READ, buf, count)) { 526 + if (!access_ok(buf, count)) { 527 527 rc = -EFAULT; 528 528 goto done; 529 529 }
+1 -1
arch/powerpc/kernel/rtasd.c
··· 335 335 336 336 count = rtas_error_log_buffer_max; 337 337 338 - if (!access_ok(VERIFY_WRITE, buf, count)) 338 + if (!access_ok(buf, count)) 339 339 return -EFAULT; 340 340 341 341 tmp = kmalloc(count, GFP_KERNEL);
+1 -1
arch/powerpc/kernel/signal.c
··· 44 44 newsp = (oldsp - frame_size) & ~0xFUL; 45 45 46 46 /* Check access */ 47 - if (!access_ok(VERIFY_WRITE, (void __user *)newsp, oldsp - newsp)) 47 + if (!access_ok((void __user *)newsp, oldsp - newsp)) 48 48 return NULL; 49 49 50 50 return (void __user *)newsp;
+6 -6
arch/powerpc/kernel/signal_32.c
··· 1017 1017 #else 1018 1018 if (__get_user(mcp, &ucp->uc_regs)) 1019 1019 return -EFAULT; 1020 - if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp))) 1020 + if (!access_ok(mcp, sizeof(*mcp))) 1021 1021 return -EFAULT; 1022 1022 #endif 1023 1023 set_current_blocked(&set); ··· 1120 1120 */ 1121 1121 mctx = (struct mcontext __user *) 1122 1122 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL); 1123 - if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) 1123 + if (!access_ok(old_ctx, ctx_size) 1124 1124 || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region) 1125 1125 || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked) 1126 1126 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs)) ··· 1128 1128 } 1129 1129 if (new_ctx == NULL) 1130 1130 return 0; 1131 - if (!access_ok(VERIFY_READ, new_ctx, ctx_size) || 1131 + if (!access_ok(new_ctx, ctx_size) || 1132 1132 fault_in_pages_readable((u8 __user *)new_ctx, ctx_size)) 1133 1133 return -EFAULT; 1134 1134 ··· 1169 1169 1170 1170 rt_sf = (struct rt_sigframe __user *) 1171 1171 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); 1172 - if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf))) 1172 + if (!access_ok(rt_sf, sizeof(*rt_sf))) 1173 1173 goto bad; 1174 1174 1175 1175 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM ··· 1315 1315 current->thread.debug.dbcr0 = new_dbcr0; 1316 1316 #endif 1317 1317 1318 - if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) || 1318 + if (!access_ok(ctx, sizeof(*ctx)) || 1319 1319 fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx))) 1320 1320 return -EFAULT; 1321 1321 ··· 1500 1500 { 1501 1501 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs); 1502 1502 addr = sr; 1503 - if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) 1503 + if (!access_ok(sr, sizeof(*sr)) 1504 1504 || restore_user_regs(regs, sr, 1)) 1505 1505 goto badframe; 1506 1506 }
+6 -7
arch/powerpc/kernel/signal_64.c
··· 383 383 err |= __get_user(v_regs, &sc->v_regs); 384 384 if (err) 385 385 return err; 386 - if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) 386 + if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128))) 387 387 return -EFAULT; 388 388 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 389 389 if (v_regs != NULL && (msr & MSR_VEC) != 0) { ··· 502 502 err |= __get_user(tm_v_regs, &tm_sc->v_regs); 503 503 if (err) 504 504 return err; 505 - if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) 505 + if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128))) 506 506 return -EFAULT; 507 - if (tm_v_regs && !access_ok(VERIFY_READ, 508 - tm_v_regs, 34 * sizeof(vector128))) 507 + if (tm_v_regs && !access_ok(tm_v_regs, 34 * sizeof(vector128))) 509 508 return -EFAULT; 510 509 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 511 510 if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) { ··· 670 671 ctx_has_vsx_region = 1; 671 672 672 673 if (old_ctx != NULL) { 673 - if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) 674 + if (!access_ok(old_ctx, ctx_size) 674 675 || setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL, 0, 675 676 ctx_has_vsx_region) 676 677 || __copy_to_user(&old_ctx->uc_sigmask, ··· 679 680 } 680 681 if (new_ctx == NULL) 681 682 return 0; 682 - if (!access_ok(VERIFY_READ, new_ctx, ctx_size) 683 + if (!access_ok(new_ctx, ctx_size) 683 684 || __get_user(tmp, (u8 __user *) new_ctx) 684 685 || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) 685 686 return -EFAULT; ··· 724 725 /* Always make any pending restarted system calls return -EINTR */ 725 726 current->restart_block.fn = do_no_restart_syscall; 726 727 727 - if (!access_ok(VERIFY_READ, uc, sizeof(*uc))) 728 + if (!access_ok(uc, sizeof(*uc))) 728 729 goto badframe; 729 730 730 731 if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
+1 -1
arch/powerpc/kernel/syscalls.c
··· 89 89 if ( (unsigned long)n >= 4096 ) 90 90 { 91 91 unsigned long __user *buffer = (unsigned long __user *)n; 92 - if (!access_ok(VERIFY_READ, buffer, 5*sizeof(unsigned long)) 92 + if (!access_ok(buffer, 5*sizeof(unsigned long)) 93 93 || __get_user(n, buffer) 94 94 || __get_user(inp, ((fd_set __user * __user *)(buffer+1))) 95 95 || __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
+1 -1
arch/powerpc/kernel/traps.c
··· 837 837 addr = (__force const void __user *)ea; 838 838 839 839 /* Check it */ 840 - if (!access_ok(VERIFY_READ, addr, 16)) { 840 + if (!access_ok(addr, 16)) { 841 841 pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx" 842 842 " instr=%08x addr=%016lx\n", 843 843 smp_processor_id(), current->comm, current->pid,
+2 -2
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 1744 1744 int first_pass; 1745 1745 unsigned long hpte[2]; 1746 1746 1747 - if (!access_ok(VERIFY_WRITE, buf, count)) 1747 + if (!access_ok(buf, count)) 1748 1748 return -EFAULT; 1749 1749 if (kvm_is_radix(kvm)) 1750 1750 return 0; ··· 1844 1844 int mmu_ready; 1845 1845 int pshift; 1846 1846 1847 - if (!access_ok(VERIFY_READ, buf, count)) 1847 + if (!access_ok(buf, count)) 1848 1848 return -EFAULT; 1849 1849 if (kvm_is_radix(kvm)) 1850 1850 return -EINVAL;
+2 -2
arch/powerpc/lib/checksum_wrappers.c
··· 37 37 goto out; 38 38 } 39 39 40 - if (unlikely((len < 0) || !access_ok(VERIFY_READ, src, len))) { 40 + if (unlikely((len < 0) || !access_ok(src, len))) { 41 41 *err_ptr = -EFAULT; 42 42 csum = (__force unsigned int)sum; 43 43 goto out; ··· 78 78 goto out; 79 79 } 80 80 81 - if (unlikely((len < 0) || !access_ok(VERIFY_WRITE, dst, len))) { 81 + if (unlikely((len < 0) || !access_ok(dst, len))) { 82 82 *err_ptr = -EFAULT; 83 83 csum = -1; /* invalid checksum */ 84 84 goto out;
+1 -1
arch/powerpc/mm/fault.c
··· 274 274 return false; 275 275 276 276 if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) && 277 - access_ok(VERIFY_READ, nip, sizeof(*nip))) { 277 + access_ok(nip, sizeof(*nip))) { 278 278 unsigned int inst; 279 279 int res; 280 280
+1 -1
arch/powerpc/mm/subpage-prot.c
··· 214 214 return 0; 215 215 } 216 216 217 - if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32))) 217 + if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32))) 218 218 return -EFAULT; 219 219 220 220 down_write(&mm->mmap_sem);
+2 -2
arch/powerpc/oprofile/backtrace.c
··· 31 31 unsigned int stack_frame[2]; 32 32 void __user *p = compat_ptr(sp); 33 33 34 - if (!access_ok(VERIFY_READ, p, sizeof(stack_frame))) 34 + if (!access_ok(p, sizeof(stack_frame))) 35 35 return 0; 36 36 37 37 /* ··· 57 57 { 58 58 unsigned long stack_frame[3]; 59 59 60 - if (!access_ok(VERIFY_READ, (void __user *)sp, sizeof(stack_frame))) 60 + if (!access_ok((void __user *)sp, sizeof(stack_frame))) 61 61 return 0; 62 62 63 63 if (__copy_from_user_inatomic(stack_frame, (void __user *)sp,
+8 -8
arch/powerpc/platforms/cell/spufs/file.c
··· 609 609 if (len < 4) 610 610 return -EINVAL; 611 611 612 - if (!access_ok(VERIFY_WRITE, buf, len)) 612 + if (!access_ok(buf, len)) 613 613 return -EFAULT; 614 614 615 615 udata = (void __user *)buf; ··· 717 717 if (len < 4) 718 718 return -EINVAL; 719 719 720 - if (!access_ok(VERIFY_WRITE, buf, len)) 720 + if (!access_ok(buf, len)) 721 721 return -EFAULT; 722 722 723 723 udata = (void __user *)buf; ··· 856 856 return -EINVAL; 857 857 858 858 udata = (void __user *)buf; 859 - if (!access_ok(VERIFY_READ, buf, len)) 859 + if (!access_ok(buf, len)) 860 860 return -EFAULT; 861 861 862 862 if (__get_user(wbox_data, udata)) ··· 1994 1994 int ret; 1995 1995 struct spu_context *ctx = file->private_data; 1996 1996 1997 - if (!access_ok(VERIFY_WRITE, buf, len)) 1997 + if (!access_ok(buf, len)) 1998 1998 return -EFAULT; 1999 1999 2000 2000 ret = spu_acquire_saved(ctx); ··· 2034 2034 struct spu_context *ctx = file->private_data; 2035 2035 int ret; 2036 2036 2037 - if (!access_ok(VERIFY_WRITE, buf, len)) 2037 + if (!access_ok(buf, len)) 2038 2038 return -EFAULT; 2039 2039 2040 2040 ret = spu_acquire_saved(ctx); ··· 2077 2077 struct spu_context *ctx = file->private_data; 2078 2078 int ret; 2079 2079 2080 - if (!access_ok(VERIFY_WRITE, buf, len)) 2080 + if (!access_ok(buf, len)) 2081 2081 return -EFAULT; 2082 2082 2083 2083 ret = spu_acquire_saved(ctx); ··· 2129 2129 struct spu_context *ctx = file->private_data; 2130 2130 int ret; 2131 2131 2132 - if (!access_ok(VERIFY_WRITE, buf, len)) 2132 + if (!access_ok(buf, len)) 2133 2133 return -EFAULT; 2134 2134 2135 2135 ret = spu_acquire_saved(ctx); ··· 2160 2160 if (len < ret) 2161 2161 return -EINVAL; 2162 2162 2163 - if (!access_ok(VERIFY_WRITE, buf, len)) 2163 + if (!access_ok(buf, len)) 2164 2164 return -EFAULT; 2165 2165 2166 2166 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
+2 -2
arch/powerpc/platforms/powernv/opal-lpc.c
··· 192 192 u32 data, pos, len, todo; 193 193 int rc; 194 194 195 - if (!access_ok(VERIFY_WRITE, ubuf, count)) 195 + if (!access_ok(ubuf, count)) 196 196 return -EFAULT; 197 197 198 198 todo = count; ··· 283 283 u32 data, pos, len, todo; 284 284 int rc; 285 285 286 - if (!access_ok(VERIFY_READ, ubuf, count)) 286 + if (!access_ok(ubuf, count)) 287 287 return -EFAULT; 288 288 289 289 todo = count;
+1 -1
arch/powerpc/platforms/pseries/scanlog.c
··· 63 63 return -EINVAL; 64 64 } 65 65 66 - if (!access_ok(VERIFY_WRITE, buf, count)) 66 + if (!access_ok(buf, count)) 67 67 return -EFAULT; 68 68 69 69 for (;;) {
+1 -1
arch/riscv/include/asm/futex.h
··· 95 95 u32 val; 96 96 uintptr_t tmp; 97 97 98 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 98 + if (!access_ok(uaddr, sizeof(u32))) 99 99 return -EFAULT; 100 100 101 101 __enable_user_access();
+4 -10
arch/riscv/include/asm/uaccess.h
··· 54 54 #define user_addr_max() (get_fs()) 55 55 56 56 57 - #define VERIFY_READ 0 58 - #define VERIFY_WRITE 1 59 - 60 57 /** 61 58 * access_ok: - Checks if a user space pointer is valid 62 - * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 63 - * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 64 - * to write to a block, it is always safe to read from it. 65 59 * @addr: User space pointer to start of block to check 66 60 * @size: Size of block to check 67 61 * ··· 70 76 * checks that the pointer is in the user space range - after calling 71 77 * this function, memory access functions may still return -EFAULT. 72 78 */ 73 - #define access_ok(type, addr, size) ({ \ 79 + #define access_ok(addr, size) ({ \ 74 80 __chk_user_ptr(addr); \ 75 81 likely(__access_ok((unsigned long __force)(addr), (size))); \ 76 82 }) ··· 252 258 ({ \ 253 259 const __typeof__(*(ptr)) __user *__p = (ptr); \ 254 260 might_fault(); \ 255 - access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ 261 + access_ok(__p, sizeof(*__p)) ? \ 256 262 __get_user((x), __p) : \ 257 263 ((x) = 0, -EFAULT); \ 258 264 }) ··· 380 386 ({ \ 381 387 __typeof__(*(ptr)) __user *__p = (ptr); \ 382 388 might_fault(); \ 383 - access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ 389 + access_ok(__p, sizeof(*__p)) ? \ 384 390 __put_user((x), __p) : \ 385 391 -EFAULT; \ 386 392 }) ··· 415 421 unsigned long __must_check clear_user(void __user *to, unsigned long n) 416 422 { 417 423 might_fault(); 418 - return access_ok(VERIFY_WRITE, to, n) ? 424 + return access_ok(to, n) ? 419 425 __clear_user(to, n) : n; 420 426 } 421 427
+2 -2
arch/riscv/kernel/signal.c
··· 115 115 116 116 frame = (struct rt_sigframe __user *)regs->sp; 117 117 118 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 118 + if (!access_ok(frame, sizeof(*frame))) 119 119 goto badframe; 120 120 121 121 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) ··· 187 187 long err = 0; 188 188 189 189 frame = get_sigframe(ksig, regs, sizeof(*frame)); 190 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 190 + if (!access_ok(frame, sizeof(*frame))) 191 191 return -EFAULT; 192 192 193 193 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+1 -1
arch/s390/include/asm/uaccess.h
··· 48 48 __range_ok((unsigned long)(addr), (size)); \ 49 49 }) 50 50 51 - #define access_ok(type, addr, size) __access_ok(addr, size) 51 + #define access_ok(addr, size) __access_ok(addr, size) 52 52 53 53 unsigned long __must_check 54 54 raw_copy_from_user(void *to, const void __user *from, unsigned long n);
+1 -1
arch/sh/include/asm/checksum_32.h
··· 197 197 int len, __wsum sum, 198 198 int *err_ptr) 199 199 { 200 - if (access_ok(VERIFY_WRITE, dst, len)) 200 + if (access_ok(dst, len)) 201 201 return csum_partial_copy_generic((__force const void *)src, 202 202 dst, len, sum, NULL, err_ptr); 203 203
+1 -1
arch/sh/include/asm/futex.h
··· 22 22 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 23 23 u32 oldval, u32 newval) 24 24 { 25 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 25 + if (!access_ok(uaddr, sizeof(u32))) 26 26 return -EFAULT; 27 27 28 28 return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
+4 -5
arch/sh/include/asm/uaccess.h
··· 18 18 */ 19 19 #define __access_ok(addr, size) \ 20 20 (__addr_ok((addr) + (size))) 21 - #define access_ok(type, addr, size) \ 21 + #define access_ok(addr, size) \ 22 22 (__chk_user_ptr(addr), \ 23 23 __access_ok((unsigned long __force)(addr), (size))) 24 24 ··· 66 66 long __gu_err = -EFAULT; \ 67 67 unsigned long __gu_val = 0; \ 68 68 const __typeof__(*(ptr)) *__gu_addr = (ptr); \ 69 - if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \ 69 + if (likely(access_ok(__gu_addr, (size)))) \ 70 70 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 71 71 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 72 72 __gu_err; \ ··· 87 87 long __pu_err = -EFAULT; \ 88 88 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 89 89 __typeof__(*(ptr)) __pu_val = x; \ 90 - if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \ 90 + if (likely(access_ok(__pu_addr, size))) \ 91 91 __put_user_size(__pu_val, __pu_addr, (size), \ 92 92 __pu_err); \ 93 93 __pu_err; \ ··· 132 132 void __user * __cl_addr = (addr); \ 133 133 unsigned long __cl_size = (n); \ 134 134 \ 135 - if (__cl_size && access_ok(VERIFY_WRITE, \ 136 - ((unsigned long)(__cl_addr)), __cl_size)) \ 135 + if (__cl_size && access_ok(__cl_addr, __cl_size)) \ 137 136 __cl_size = __clear_user(__cl_addr, __cl_size); \ 138 137 \ 139 138 __cl_size; \
+4 -4
arch/sh/kernel/signal_32.c
··· 160 160 /* Always make any pending restarted system calls return -EINTR */ 161 161 current->restart_block.fn = do_no_restart_syscall; 162 162 163 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 163 + if (!access_ok(frame, sizeof(*frame))) 164 164 goto badframe; 165 165 166 166 if (__get_user(set.sig[0], &frame->sc.oldmask) ··· 190 190 /* Always make any pending restarted system calls return -EINTR */ 191 191 current->restart_block.fn = do_no_restart_syscall; 192 192 193 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 193 + if (!access_ok(frame, sizeof(*frame))) 194 194 goto badframe; 195 195 196 196 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) ··· 272 272 273 273 frame = get_sigframe(&ksig->ka, regs->regs[15], sizeof(*frame)); 274 274 275 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 275 + if (!access_ok(frame, sizeof(*frame))) 276 276 return -EFAULT; 277 277 278 278 err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); ··· 338 338 339 339 frame = get_sigframe(&ksig->ka, regs->regs[15], sizeof(*frame)); 340 340 341 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 341 + if (!access_ok(frame, sizeof(*frame))) 342 342 return -EFAULT; 343 343 344 344 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+4 -4
arch/sh/kernel/signal_64.c
··· 259 259 /* Always make any pending restarted system calls return -EINTR */ 260 260 current->restart_block.fn = do_no_restart_syscall; 261 261 262 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 262 + if (!access_ok(frame, sizeof(*frame))) 263 263 goto badframe; 264 264 265 265 if (__get_user(set.sig[0], &frame->sc.oldmask) ··· 293 293 /* Always make any pending restarted system calls return -EINTR */ 294 294 current->restart_block.fn = do_no_restart_syscall; 295 295 296 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 296 + if (!access_ok(frame, sizeof(*frame))) 297 297 goto badframe; 298 298 299 299 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) ··· 379 379 380 380 frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame)); 381 381 382 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 382 + if (!access_ok(frame, sizeof(*frame))) 383 383 return -EFAULT; 384 384 385 385 err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); ··· 465 465 466 466 frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame)); 467 467 468 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 468 + if (!access_ok(frame, sizeof(*frame))) 469 469 return -EFAULT; 470 470 471 471 err |= __put_user(&frame->info, &frame->pinfo);
+6 -6
arch/sh/kernel/traps_64.c
··· 40 40 /* SHmedia */ 41 41 aligned_pc = pc & ~3; 42 42 if (from_user_mode) { 43 - if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t))) { 43 + if (!access_ok(aligned_pc, sizeof(insn_size_t))) { 44 44 get_user_error = -EFAULT; 45 45 } else { 46 46 get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc); ··· 180 180 if (user_mode(regs)) { 181 181 __u64 buffer; 182 182 183 - if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) { 183 + if (!access_ok((unsigned long) address, 1UL<<width_shift)) { 184 184 return -1; 185 185 } 186 186 ··· 254 254 if (user_mode(regs)) { 255 255 __u64 buffer; 256 256 257 - if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { 257 + if (!access_ok((unsigned long) address, 1UL<<width_shift)) { 258 258 return -1; 259 259 } 260 260 ··· 327 327 __u64 buffer; 328 328 __u32 buflo, bufhi; 329 329 330 - if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) { 330 + if (!access_ok((unsigned long) address, 1UL<<width_shift)) { 331 331 return -1; 332 332 } 333 333 ··· 400 400 /* Initialise these to NaNs. */ 401 401 __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL; 402 402 403 - if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { 403 + if (!access_ok((unsigned long) address, 1UL<<width_shift)) { 404 404 return -1; 405 405 } 406 406 ··· 663 663 /* SHmedia : check for defect. This requires executable vmas 664 664 to be readable too. */ 665 665 aligned_pc = pc & ~3; 666 - if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t))) 666 + if (!access_ok(aligned_pc, sizeof(insn_size_t))) 667 667 get_user_error = -EFAULT; 668 668 else 669 669 get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
+1 -2
arch/sh/mm/gup.c
··· 177 177 addr = start; 178 178 len = (unsigned long) nr_pages << PAGE_SHIFT; 179 179 end = start + len; 180 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, 181 - (void __user *)start, len))) 180 + if (unlikely(!access_ok((void __user *)start, len))) 182 181 return 0; 183 182 184 183 /*
+1 -1
arch/sh/oprofile/backtrace.c
··· 51 51 unsigned long buf_stack; 52 52 53 53 /* Also check accessibility of address */ 54 - if (!access_ok(VERIFY_READ, stackaddr, sizeof(unsigned long))) 54 + if (!access_ok(stackaddr, sizeof(unsigned long))) 55 55 return NULL; 56 56 57 57 if (__copy_from_user_inatomic(&buf_stack, stackaddr, sizeof(unsigned long)))
+1 -1
arch/sparc/include/asm/checksum_32.h
··· 87 87 csum_partial_copy_to_user(const void *src, void __user *dst, int len, 88 88 __wsum sum, int *err) 89 89 { 90 - if (!access_ok (VERIFY_WRITE, dst, len)) { 90 + if (!access_ok(dst, len)) { 91 91 *err = -EFAULT; 92 92 return sum; 93 93 } else {
+1 -1
arch/sparc/include/asm/uaccess_32.h
··· 39 39 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) 40 40 #define __kernel_ok (uaccess_kernel()) 41 41 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) 42 - #define access_ok(type, addr, size) \ 42 + #define access_ok(addr, size) \ 43 43 ({ (void)(type); __access_ok((unsigned long)(addr), size); }) 44 44 45 45 /*
+1 -1
arch/sparc/include/asm/uaccess_64.h
··· 68 68 return 1; 69 69 } 70 70 71 - static inline int access_ok(int type, const void __user * addr, unsigned long size) 71 + static inline int access_ok(const void __user * addr, unsigned long size) 72 72 { 73 73 return 1; 74 74 }
+1 -1
arch/sparc/kernel/sigutil_32.c
··· 65 65 set_used_math(); 66 66 clear_tsk_thread_flag(current, TIF_USEDFPU); 67 67 68 - if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu))) 68 + if (!access_ok(fpu, sizeof(*fpu))) 69 69 return -EFAULT; 70 70 71 71 err = __copy_from_user(&current->thread.float_regs[0], &fpu->si_float_regs[0],
+3 -4
arch/sparc/kernel/unaligned_32.c
··· 278 278 enum direction dir) 279 279 { 280 280 unsigned int reg; 281 - int check = (dir == load) ? VERIFY_READ : VERIFY_WRITE; 282 281 int size = ((insn >> 19) & 3) == 3 ? 8 : 4; 283 282 284 283 if ((regs->pc | regs->npc) & 3) ··· 289 290 290 291 reg = (insn >> 25) & 0x1f; 291 292 if (reg >= 16) { 292 - if (!access_ok(check, WINREG_ADDR(reg - 16), size)) 293 + if (!access_ok(WINREG_ADDR(reg - 16), size)) 293 294 return -EFAULT; 294 295 } 295 296 reg = (insn >> 14) & 0x1f; 296 297 if (reg >= 16) { 297 - if (!access_ok(check, WINREG_ADDR(reg - 16), size)) 298 + if (!access_ok(WINREG_ADDR(reg - 16), size)) 298 299 return -EFAULT; 299 300 } 300 301 if (!(insn & 0x2000)) { 301 302 reg = (insn & 0x1f); 302 303 if (reg >= 16) { 303 - if (!access_ok(check, WINREG_ADDR(reg - 16), size)) 304 + if (!access_ok(WINREG_ADDR(reg - 16), size)) 304 305 return -EFAULT; 305 306 } 306 307 }
+2 -2
arch/um/kernel/ptrace.c
··· 66 66 67 67 #ifdef PTRACE_GETREGS 68 68 case PTRACE_GETREGS: { /* Get all gp regs from the child. */ 69 - if (!access_ok(VERIFY_WRITE, p, MAX_REG_OFFSET)) { 69 + if (!access_ok(p, MAX_REG_OFFSET)) { 70 70 ret = -EIO; 71 71 break; 72 72 } ··· 81 81 #ifdef PTRACE_SETREGS 82 82 case PTRACE_SETREGS: { /* Set all gp regs in the child. */ 83 83 unsigned long tmp = 0; 84 - if (!access_ok(VERIFY_READ, p, MAX_REG_OFFSET)) { 84 + if (!access_ok(p, MAX_REG_OFFSET)) { 85 85 ret = -EIO; 86 86 break; 87 87 }
+2 -2
arch/unicore32/kernel/signal.c
··· 117 117 118 118 frame = (struct rt_sigframe __user *)regs->UCreg_sp; 119 119 120 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 120 + if (!access_ok(frame, sizeof(*frame))) 121 121 goto badframe; 122 122 123 123 if (restore_sigframe(regs, &frame->sig)) ··· 205 205 /* 206 206 * Check that we can actually write to the signal frame. 207 207 */ 208 - if (!access_ok(VERIFY_WRITE, frame, framesize)) 208 + if (!access_ok(frame, framesize)) 209 209 frame = NULL; 210 210 211 211 return frame;
+1 -1
arch/x86/entry/vsyscall/vsyscall_64.c
··· 99 99 * sig_on_uaccess_err, this could go away. 100 100 */ 101 101 102 - if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) { 102 + if (!access_ok((void __user *)ptr, size)) { 103 103 struct thread_struct *thread = &current->thread; 104 104 105 105 thread->error_code = X86_PF_USER | X86_PF_WRITE;
+2 -2
arch/x86/ia32/ia32_aout.c
··· 176 176 177 177 /* make sure we actually have a data and stack area to dump */ 178 178 set_fs(USER_DS); 179 - if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_DATA(dump), 179 + if (!access_ok((void *) (unsigned long)START_DATA(dump), 180 180 dump.u_dsize << PAGE_SHIFT)) 181 181 dump.u_dsize = 0; 182 - if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_STACK(dump), 182 + if (!access_ok((void *) (unsigned long)START_STACK(dump), 183 183 dump.u_ssize << PAGE_SHIFT)) 184 184 dump.u_ssize = 0; 185 185
+4 -4
arch/x86/ia32/ia32_signal.c
··· 119 119 struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8); 120 120 sigset_t set; 121 121 122 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 122 + if (!access_ok(frame, sizeof(*frame))) 123 123 goto badframe; 124 124 if (__get_user(set.sig[0], &frame->sc.oldmask) 125 125 || (_COMPAT_NSIG_WORDS > 1 ··· 147 147 148 148 frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4); 149 149 150 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 150 + if (!access_ok(frame, sizeof(*frame))) 151 151 goto badframe; 152 152 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 153 153 goto badframe; ··· 269 269 270 270 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate); 271 271 272 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 272 + if (!access_ok(frame, sizeof(*frame))) 273 273 return -EFAULT; 274 274 275 275 if (__put_user(sig, &frame->sig)) ··· 349 349 350 350 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate); 351 351 352 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 352 + if (!access_ok(frame, sizeof(*frame))) 353 353 return -EFAULT; 354 354 355 355 put_user_try {
+1 -1
arch/x86/ia32/sys_ia32.c
··· 75 75 typeof(ubuf->st_gid) gid = 0; 76 76 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid)); 77 77 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid)); 78 - if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) || 78 + if (!access_ok(ubuf, sizeof(struct stat64)) || 79 79 __put_user(huge_encode_dev(stat->dev), &ubuf->st_dev) || 80 80 __put_user(stat->ino, &ubuf->__st_ino) || 81 81 __put_user(stat->ino, &ubuf->st_ino) ||
+1 -1
arch/x86/include/asm/checksum_32.h
··· 182 182 __wsum ret; 183 183 184 184 might_sleep(); 185 - if (access_ok(VERIFY_WRITE, dst, len)) { 185 + if (access_ok(dst, len)) { 186 186 stac(); 187 187 ret = csum_partial_copy_generic(src, (__force void *)dst, 188 188 len, sum, NULL, err_ptr);
+1 -1
arch/x86/include/asm/pgtable_32.h
··· 37 37 /* 38 38 * Define this if things work differently on an i386 and an i486: 39 39 * it will (on an i486) warn about kernel memory accesses that are 40 - * done without a 'access_ok(VERIFY_WRITE,..)' 40 + * done without a 'access_ok( ..)' 41 41 */ 42 42 #undef TEST_ACCESS_OK 43 43
+2 -5
arch/x86/include/asm/uaccess.h
··· 77 77 78 78 /** 79 79 * access_ok: - Checks if a user space pointer is valid 80 - * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 81 - * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 82 - * to write to a block, it is always safe to read from it. 83 80 * @addr: User space pointer to start of block to check 84 81 * @size: Size of block to check 85 82 * ··· 92 95 * checks that the pointer is in the user space range - after calling 93 96 * this function, memory access functions may still return -EFAULT. 94 97 */ 95 - #define access_ok(type, addr, size) \ 98 + #define access_ok(addr, size) \ 96 99 ({ \ 97 100 WARN_ON_IN_IRQ(); \ 98 101 likely(!__range_not_ok(addr, size, user_addr_max())); \ ··· 667 670 668 671 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ 669 672 ({ \ 670 - access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ 673 + access_ok((ptr), sizeof(*(ptr))) ? \ 671 674 __user_atomic_cmpxchg_inatomic((uval), (ptr), \ 672 675 (old), (new), sizeof(*(ptr))) : \ 673 676 -EFAULT; \
+2 -2
arch/x86/kernel/fpu/signal.c
··· 164 164 ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) || 165 165 IS_ENABLED(CONFIG_IA32_EMULATION)); 166 166 167 - if (!access_ok(VERIFY_WRITE, buf, size)) 167 + if (!access_ok(buf, size)) 168 168 return -EACCES; 169 169 170 170 if (!static_cpu_has(X86_FEATURE_FPU)) ··· 281 281 return 0; 282 282 } 283 283 284 - if (!access_ok(VERIFY_READ, buf, size)) 284 + if (!access_ok(buf, size)) 285 285 return -EACCES; 286 286 287 287 fpu__initialize(fpu);
+7 -7
arch/x86/kernel/signal.c
··· 322 322 323 323 frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate); 324 324 325 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 325 + if (!access_ok(frame, sizeof(*frame))) 326 326 return -EFAULT; 327 327 328 328 if (__put_user(sig, &frame->sig)) ··· 385 385 386 386 frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate); 387 387 388 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 388 + if (!access_ok(frame, sizeof(*frame))) 389 389 return -EFAULT; 390 390 391 391 put_user_try { ··· 465 465 466 466 frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp); 467 467 468 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 468 + if (!access_ok(frame, sizeof(*frame))) 469 469 return -EFAULT; 470 470 471 471 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { ··· 547 547 548 548 frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate); 549 549 550 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 550 + if (!access_ok(frame, sizeof(*frame))) 551 551 return -EFAULT; 552 552 553 553 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { ··· 610 610 611 611 frame = (struct sigframe __user *)(regs->sp - 8); 612 612 613 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 613 + if (!access_ok(frame, sizeof(*frame))) 614 614 goto badframe; 615 615 if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1 616 616 && __copy_from_user(&set.sig[1], &frame->extramask, ··· 642 642 unsigned long uc_flags; 643 643 644 644 frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); 645 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 645 + if (!access_ok(frame, sizeof(*frame))) 646 646 goto badframe; 647 647 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 648 648 goto badframe; ··· 871 871 872 872 frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8); 873 873 874 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 874 + if (!access_ok(frame, sizeof(*frame))) 875 875 goto badframe; 876 876 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 877 877 goto badframe;
+1 -1
arch/x86/kernel/stacktrace.c
··· 177 177 { 178 178 int ret; 179 179 180 - if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) 180 + if (!access_ok(fp, sizeof(*frame))) 181 181 return 0; 182 182 183 183 ret = 1;
+2 -2
arch/x86/kernel/vm86_32.c
··· 114 114 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask); 115 115 user = vm86->user_vm86; 116 116 117 - if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ? 117 + if (!access_ok(user, vm86->vm86plus.is_vm86pus ? 118 118 sizeof(struct vm86plus_struct) : 119 119 sizeof(struct vm86_struct))) { 120 120 pr_alert("could not access userspace vm86 info\n"); ··· 278 278 if (vm86->saved_sp0) 279 279 return -EPERM; 280 280 281 - if (!access_ok(VERIFY_READ, user_vm86, plus ? 281 + if (!access_ok(user_vm86, plus ? 282 282 sizeof(struct vm86_struct) : 283 283 sizeof(struct vm86plus_struct))) 284 284 return -EFAULT;
+2 -2
arch/x86/lib/csum-wrappers_64.c
··· 27 27 might_sleep(); 28 28 *errp = 0; 29 29 30 - if (!likely(access_ok(VERIFY_READ, src, len))) 30 + if (!likely(access_ok(src, len))) 31 31 goto out_err; 32 32 33 33 /* ··· 89 89 90 90 might_sleep(); 91 91 92 - if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { 92 + if (unlikely(!access_ok(dst, len))) { 93 93 *errp = -EFAULT; 94 94 return 0; 95 95 }
+1 -1
arch/x86/lib/usercopy_32.c
··· 67 67 clear_user(void __user *to, unsigned long n) 68 68 { 69 69 might_fault(); 70 - if (access_ok(VERIFY_WRITE, to, n)) 70 + if (access_ok(to, n)) 71 71 __do_clear_user(to, n); 72 72 return n; 73 73 }
+1 -1
arch/x86/lib/usercopy_64.c
··· 48 48 49 49 unsigned long clear_user(void __user *to, unsigned long n) 50 50 { 51 - if (access_ok(VERIFY_WRITE, to, n)) 51 + if (access_ok(to, n)) 52 52 return __clear_user(to, n); 53 53 return n; 54 54 }
+2 -2
arch/x86/math-emu/fpu_system.h
··· 104 104 #define instruction_address (*(struct address *)&I387->soft.fip) 105 105 #define operand_address (*(struct address *)&I387->soft.foo) 106 106 107 - #define FPU_access_ok(x,y,z) if ( !access_ok(x,y,z) ) \ 107 + #define FPU_access_ok(y,z) if ( !access_ok(y,z) ) \ 108 108 math_abort(FPU_info,SIGSEGV) 109 109 #define FPU_abort math_abort(FPU_info, SIGSEGV) 110 110 ··· 119 119 /* A simpler test than access_ok() can probably be done for 120 120 FPU_code_access_ok() because the only possible error is to step 121 121 past the upper boundary of a legal code area. */ 122 - #define FPU_code_access_ok(z) FPU_access_ok(VERIFY_READ,(void __user *)FPU_EIP,z) 122 + #define FPU_code_access_ok(z) FPU_access_ok((void __user *)FPU_EIP,z) 123 123 #endif 124 124 125 125 #define FPU_get_user(x,y) get_user((x),(y))
+3 -3
arch/x86/math-emu/load_store.c
··· 251 251 break; 252 252 case 024: /* fldcw */ 253 253 RE_ENTRANT_CHECK_OFF; 254 - FPU_access_ok(VERIFY_READ, data_address, 2); 254 + FPU_access_ok(data_address, 2); 255 255 FPU_get_user(control_word, 256 256 (unsigned short __user *)data_address); 257 257 RE_ENTRANT_CHECK_ON; ··· 291 291 break; 292 292 case 034: /* fstcw m16int */ 293 293 RE_ENTRANT_CHECK_OFF; 294 - FPU_access_ok(VERIFY_WRITE, data_address, 2); 294 + FPU_access_ok(data_address, 2); 295 295 FPU_put_user(control_word, 296 296 (unsigned short __user *)data_address); 297 297 RE_ENTRANT_CHECK_ON; ··· 305 305 break; 306 306 case 036: /* fstsw m2byte */ 307 307 RE_ENTRANT_CHECK_OFF; 308 - FPU_access_ok(VERIFY_WRITE, data_address, 2); 308 + FPU_access_ok(data_address, 2); 309 309 FPU_put_user(status_word(), 310 310 (unsigned short __user *)data_address); 311 311 RE_ENTRANT_CHECK_ON;
+24 -24
arch/x86/math-emu/reg_ld_str.c
··· 84 84 FPU_REG *sti_ptr = &st(stnr); 85 85 86 86 RE_ENTRANT_CHECK_OFF; 87 - FPU_access_ok(VERIFY_READ, s, 10); 87 + FPU_access_ok(s, 10); 88 88 __copy_from_user(sti_ptr, s, 10); 89 89 RE_ENTRANT_CHECK_ON; 90 90 ··· 98 98 unsigned m64, l64; 99 99 100 100 RE_ENTRANT_CHECK_OFF; 101 - FPU_access_ok(VERIFY_READ, dfloat, 8); 101 + FPU_access_ok(dfloat, 8); 102 102 FPU_get_user(m64, 1 + (unsigned long __user *)dfloat); 103 103 FPU_get_user(l64, (unsigned long __user *)dfloat); 104 104 RE_ENTRANT_CHECK_ON; ··· 159 159 int exp, tag, negative; 160 160 161 161 RE_ENTRANT_CHECK_OFF; 162 - FPU_access_ok(VERIFY_READ, single, 4); 162 + FPU_access_ok(single, 4); 163 163 FPU_get_user(m32, (unsigned long __user *)single); 164 164 RE_ENTRANT_CHECK_ON; 165 165 ··· 214 214 FPU_REG *st0_ptr = &st(0); 215 215 216 216 RE_ENTRANT_CHECK_OFF; 217 - FPU_access_ok(VERIFY_READ, _s, 8); 217 + FPU_access_ok(_s, 8); 218 218 if (copy_from_user(&s, _s, 8)) 219 219 FPU_abort; 220 220 RE_ENTRANT_CHECK_ON; ··· 243 243 int negative; 244 244 245 245 RE_ENTRANT_CHECK_OFF; 246 - FPU_access_ok(VERIFY_READ, _s, 4); 246 + FPU_access_ok(_s, 4); 247 247 FPU_get_user(s, _s); 248 248 RE_ENTRANT_CHECK_ON; 249 249 ··· 271 271 int s, negative; 272 272 273 273 RE_ENTRANT_CHECK_OFF; 274 - FPU_access_ok(VERIFY_READ, _s, 2); 274 + FPU_access_ok(_s, 2); 275 275 /* Cast as short to get the sign extended. */ 276 276 FPU_get_user(s, _s); 277 277 RE_ENTRANT_CHECK_ON; ··· 304 304 int sign; 305 305 306 306 RE_ENTRANT_CHECK_OFF; 307 - FPU_access_ok(VERIFY_READ, s, 10); 307 + FPU_access_ok(s, 10); 308 308 RE_ENTRANT_CHECK_ON; 309 309 for (pos = 8; pos >= 0; pos--) { 310 310 l *= 10; ··· 345 345 346 346 if (st0_tag != TAG_Empty) { 347 347 RE_ENTRANT_CHECK_OFF; 348 - FPU_access_ok(VERIFY_WRITE, d, 10); 348 + FPU_access_ok(d, 10); 349 349 350 350 FPU_put_user(st0_ptr->sigl, (unsigned long __user *)d); 351 351 FPU_put_user(st0_ptr->sigh, ··· 364 364 /* The masked response */ 365 365 /* Put out the QNaN indefinite */ 366 366 RE_ENTRANT_CHECK_OFF; 367 - FPU_access_ok(VERIFY_WRITE, d, 10); 367 + FPU_access_ok(d, 10); 368 368 FPU_put_user(0, (unsigned long __user *)d); 369 369 FPU_put_user(0xc0000000, 1 + (unsigned long __user *)d); 370 370 FPU_put_user(0xffff, 4 + (short __user *)d); ··· 539 539 /* The masked response */ 540 540 /* Put out the QNaN indefinite */ 541 541 RE_ENTRANT_CHECK_OFF; 542 - FPU_access_ok(VERIFY_WRITE, dfloat, 8); 542 + FPU_access_ok(dfloat, 8); 543 543 FPU_put_user(0, (unsigned long __user *)dfloat); 544 544 FPU_put_user(0xfff80000, 545 545 1 + (unsigned long __user *)dfloat); ··· 552 552 l[1] |= 0x80000000; 553 553 554 554 RE_ENTRANT_CHECK_OFF; 555 - FPU_access_ok(VERIFY_WRITE, dfloat, 8); 555 + FPU_access_ok(dfloat, 8); 556 556 FPU_put_user(l[0], (unsigned long __user *)dfloat); 557 557 FPU_put_user(l[1], 1 + (unsigned long __user *)dfloat); 558 558 RE_ENTRANT_CHECK_ON; ··· 724 724 /* The masked response */ 725 725 /* Put out the QNaN indefinite */ 726 726 RE_ENTRANT_CHECK_OFF; 727 - FPU_access_ok(VERIFY_WRITE, single, 4); 727 + FPU_access_ok(single, 4); 728 728 FPU_put_user(0xffc00000, 729 729 (unsigned long __user *)single); 730 730 RE_ENTRANT_CHECK_ON; ··· 742 742 templ |= 0x80000000; 743 743 744 744 RE_ENTRANT_CHECK_OFF; 745 - FPU_access_ok(VERIFY_WRITE, single, 4); 745 + FPU_access_ok(single, 4); 746 746 FPU_put_user(templ, (unsigned long __user *)single); 747 747 RE_ENTRANT_CHECK_ON; 748 748 ··· 791 791 } 792 792 793 793 RE_ENTRANT_CHECK_OFF; 794 - FPU_access_ok(VERIFY_WRITE, d, 8); 794 + FPU_access_ok(d, 8); 795 795 if (copy_to_user(d, &tll, 8)) 796 796 FPU_abort; 797 797 RE_ENTRANT_CHECK_ON; ··· 838 838 } 839 839 840 840 RE_ENTRANT_CHECK_OFF; 841 - FPU_access_ok(VERIFY_WRITE, d, 4); 841 + FPU_access_ok(d, 4); 842 842 FPU_put_user(t.sigl, (unsigned long __user *)d); 843 843 RE_ENTRANT_CHECK_ON; 844 844 ··· 884 884 } 885 885 886 886 RE_ENTRANT_CHECK_OFF; 887 - FPU_access_ok(VERIFY_WRITE, d, 2); 887 + FPU_access_ok(d, 2); 888 888 FPU_put_user((short)t.sigl, d); 889 889 RE_ENTRANT_CHECK_ON; 890 890 ··· 925 925 if (control_word & CW_Invalid) { 926 926 /* Produce the QNaN "indefinite" */ 927 927 RE_ENTRANT_CHECK_OFF; 928 - FPU_access_ok(VERIFY_WRITE, d, 10); 928 + FPU_access_ok(d, 10); 929 929 for (i = 0; i < 7; i++) 930 930 FPU_put_user(0, d + i); /* These bytes "undefined" */ 931 931 FPU_put_user(0xc0, d + 7); /* This byte "undefined" */ ··· 941 941 } 942 942 943 943 RE_ENTRANT_CHECK_OFF; 944 - FPU_access_ok(VERIFY_WRITE, d, 10); 944 + FPU_access_ok(d, 10); 945 945 RE_ENTRANT_CHECK_ON; 946 946 for (i = 0; i < 9; i++) { 947 947 b = FPU_div_small(&ll, 10); ··· 1034 1034 ((addr_modes.default_mode == PM16) 1035 1035 ^ (addr_modes.override.operand_size == OP_SIZE_PREFIX))) { 1036 1036 RE_ENTRANT_CHECK_OFF; 1037 - FPU_access_ok(VERIFY_READ, s, 0x0e); 1037 + FPU_access_ok(s, 0x0e); 1038 1038 FPU_get_user(control_word, (unsigned short __user *)s); 1039 1039 FPU_get_user(partial_status, (unsigned short __user *)(s + 2)); 1040 1040 FPU_get_user(tag_word, (unsigned short __user *)(s + 4)); ··· 1056 1056 } 1057 1057 } else { 1058 1058 RE_ENTRANT_CHECK_OFF; 1059 - FPU_access_ok(VERIFY_READ, s, 0x1c); 1059 + FPU_access_ok(s, 0x1c); 1060 1060 FPU_get_user(control_word, (unsigned short __user *)s); 1061 1061 FPU_get_user(partial_status, (unsigned short __user *)(s + 4)); 1062 1062 FPU_get_user(tag_word, (unsigned short __user *)(s + 8)); ··· 1125 1125 1126 1126 /* Copy all registers in stack order. */ 1127 1127 RE_ENTRANT_CHECK_OFF; 1128 - FPU_access_ok(VERIFY_READ, s, 80); 1128 + FPU_access_ok(s, 80); 1129 1129 __copy_from_user(register_base + offset, s, other); 1130 1130 if (offset) 1131 1131 __copy_from_user(register_base, s + other, offset); ··· 1146 1146 ((addr_modes.default_mode == PM16) 1147 1147 ^ (addr_modes.override.operand_size == OP_SIZE_PREFIX))) { 1148 1148 RE_ENTRANT_CHECK_OFF; 1149 - FPU_access_ok(VERIFY_WRITE, d, 14); 1149 + FPU_access_ok(d, 14); 1150 1150 #ifdef PECULIAR_486 1151 1151 FPU_put_user(control_word & ~0xe080, (unsigned long __user *)d); 1152 1152 #else ··· 1174 1174 d += 0x0e; 1175 1175 } else { 1176 1176 RE_ENTRANT_CHECK_OFF; 1177 - FPU_access_ok(VERIFY_WRITE, d, 7 * 4); 1177 + FPU_access_ok(d, 7 * 4); 1178 1178 #ifdef PECULIAR_486 1179 1179 control_word &= ~0xe080; 1180 1180 /* An 80486 sets nearly all of the reserved bits to 1. */ ··· 1204 1204 d = fstenv(addr_modes, data_address); 1205 1205 1206 1206 RE_ENTRANT_CHECK_OFF; 1207 - FPU_access_ok(VERIFY_WRITE, d, 80); 1207 + FPU_access_ok(d, 80); 1208 1208 1209 1209 /* Copy all registers in stack order. */ 1210 1210 if (__copy_to_user(d, register_base + offset, other))
+1 -1
arch/x86/mm/mpx.c
··· 495 495 unsigned long bd_entry; 496 496 unsigned long bt_addr; 497 497 498 - if (!access_ok(VERIFY_READ, (bd_entry_ptr), sizeof(*bd_entry_ptr))) 498 + if (!access_ok((bd_entry_ptr), sizeof(*bd_entry_ptr))) 499 499 return -EFAULT; 500 500 501 501 while (1) {
+1 -1
arch/x86/um/asm/checksum_32.h
··· 43 43 void __user *dst, 44 44 int len, __wsum sum, int *err_ptr) 45 45 { 46 - if (access_ok(VERIFY_WRITE, dst, len)) { 46 + if (access_ok(dst, len)) { 47 47 if (copy_to_user(dst, src, len)) { 48 48 *err_ptr = -EFAULT; 49 49 return (__force __wsum)-1;
+3 -3
arch/x86/um/signal.c
··· 367 367 /* This is the same calculation as i386 - ((sp + 4) & 15) == 0 */ 368 368 stack_top = ((stack_top + 4) & -16UL) - 4; 369 369 frame = (struct sigframe __user *) stack_top - 1; 370 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 370 + if (!access_ok(frame, sizeof(*frame))) 371 371 return 1; 372 372 373 373 restorer = frame->retcode; ··· 412 412 413 413 stack_top &= -8UL; 414 414 frame = (struct rt_sigframe __user *) stack_top - 1; 415 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 415 + if (!access_ok(frame, sizeof(*frame))) 416 416 return 1; 417 417 418 418 restorer = frame->retcode; ··· 497 497 /* Subtract 128 for a red zone and 8 for proper alignment */ 498 498 frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8); 499 499 500 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 500 + if (!access_ok(frame, sizeof(*frame))) 501 501 goto out; 502 502 503 503 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
+1 -1
arch/xtensa/include/asm/checksum.h
··· 243 243 void __user *dst, int len, 244 244 __wsum sum, int *err_ptr) 245 245 { 246 - if (access_ok(VERIFY_WRITE, dst, len)) 246 + if (access_ok(dst, len)) 247 247 return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr); 248 248 249 249 if (len)
+1 -1
arch/xtensa/include/asm/futex.h
··· 93 93 { 94 94 int ret = 0; 95 95 96 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 96 + if (!access_ok(uaddr, sizeof(u32))) 97 97 return -EFAULT; 98 98 99 99 #if !XCHAL_HAVE_S32C1I
+5 -5
arch/xtensa/include/asm/uaccess.h
··· 42 42 #define __user_ok(addr, size) \ 43 43 (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) 44 44 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) 45 - #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) 45 + #define access_ok(addr, size) __access_ok((unsigned long)(addr), (size)) 46 46 47 47 #define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) 48 48 ··· 86 86 ({ \ 87 87 long __pu_err = -EFAULT; \ 88 88 __typeof__(*(ptr)) *__pu_addr = (ptr); \ 89 - if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ 89 + if (access_ok(__pu_addr, size)) \ 90 90 __put_user_size((x), __pu_addr, (size), __pu_err); \ 91 91 __pu_err; \ 92 92 }) ··· 183 183 ({ \ 184 184 long __gu_err = -EFAULT, __gu_val = 0; \ 185 185 const __typeof__(*(ptr)) *__gu_addr = (ptr); \ 186 - if (access_ok(VERIFY_READ, __gu_addr, size)) \ 186 + if (access_ok(__gu_addr, size)) \ 187 187 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 188 188 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 189 189 __gu_err; \ ··· 269 269 static inline unsigned long 270 270 clear_user(void *addr, unsigned long size) 271 271 { 272 - if (access_ok(VERIFY_WRITE, addr, size)) 272 + if (access_ok(addr, size)) 273 273 return __xtensa_clear_user(addr, size); 274 274 return size ? -EFAULT : 0; 275 275 } ··· 284 284 static inline long 285 285 strncpy_from_user(char *dst, const char *src, long count) 286 286 { 287 - if (access_ok(VERIFY_READ, src, 1)) 287 + if (access_ok(src, 1)) 288 288 return __strncpy_user(dst, src, count); 289 289 return -EFAULT; 290 290 }
+2 -2
arch/xtensa/kernel/signal.c
··· 251 251 252 252 frame = (struct rt_sigframe __user *) regs->areg[1]; 253 253 254 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 254 + if (!access_ok(frame, sizeof(*frame))) 255 255 goto badframe; 256 256 257 257 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) ··· 348 348 if (regs->depc > 64) 349 349 panic ("Double exception sys_sigreturn\n"); 350 350 351 - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) { 351 + if (!access_ok(frame, sizeof(*frame))) { 352 352 return -EFAULT; 353 353 } 354 354
+1 -1
arch/xtensa/kernel/stacktrace.c
··· 91 91 pc = MAKE_PC_FROM_RA(a0, pc); 92 92 93 93 /* Check if the region is OK to access. */ 94 - if (!access_ok(VERIFY_READ, &SPILL_SLOT(a1, 0), 8)) 94 + if (!access_ok(&SPILL_SLOT(a1, 0), 8)) 95 95 return; 96 96 /* Copy a1, a0 from user space stack frame. */ 97 97 if (__get_user(a0, &SPILL_SLOT(a1, 0)) ||
+2 -2
drivers/acpi/acpi_dbg.c
··· 614 614 615 615 if (!count) 616 616 return 0; 617 - if (!access_ok(VERIFY_WRITE, buf, count)) 617 + if (!access_ok(buf, count)) 618 618 return -EFAULT; 619 619 620 620 while (count > 0) { ··· 684 684 685 685 if (!count) 686 686 return 0; 687 - if (!access_ok(VERIFY_READ, buf, count)) 687 + if (!access_ok(buf, count)) 688 688 return -EFAULT; 689 689 690 690 while (count > 0) {
+2 -2
drivers/char/generic_nvram.c
··· 44 44 unsigned int i; 45 45 char __user *p = buf; 46 46 47 - if (!access_ok(VERIFY_WRITE, buf, count)) 47 + if (!access_ok(buf, count)) 48 48 return -EFAULT; 49 49 if (*ppos >= nvram_len) 50 50 return 0; ··· 62 62 const char __user *p = buf; 63 63 char c; 64 64 65 - if (!access_ok(VERIFY_READ, buf, count)) 65 + if (!access_ok(buf, count)) 66 66 return -EFAULT; 67 67 if (*ppos >= nvram_len) 68 68 return 0;
+2 -2
drivers/char/mem.c
··· 609 609 unsigned long i = *ppos; 610 610 char __user *tmp = buf; 611 611 612 - if (!access_ok(VERIFY_WRITE, buf, count)) 612 + if (!access_ok(buf, count)) 613 613 return -EFAULT; 614 614 while (count-- > 0 && i < 65536) { 615 615 if (__put_user(inb(i), tmp) < 0) ··· 627 627 unsigned long i = *ppos; 628 628 const char __user *tmp = buf; 629 629 630 - if (!access_ok(VERIFY_READ, buf, count)) 630 + if (!access_ok(buf, count)) 631 631 return -EFAULT; 632 632 while (count-- > 0 && i < 65536) { 633 633 char c;
+1 -1
drivers/char/nwflash.c
··· 167 167 if (count > gbFlashSize - p) 168 168 count = gbFlashSize - p; 169 169 170 - if (!access_ok(VERIFY_READ, buf, count)) 170 + if (!access_ok(buf, count)) 171 171 return -EFAULT; 172 172 173 173 /*
+2 -2
drivers/char/pcmcia/cm4000_cs.c
··· 1445 1445 _IOC_DIR(cmd), _IOC_READ, _IOC_WRITE, size, cmd); 1446 1446 1447 1447 if (_IOC_DIR(cmd) & _IOC_READ) { 1448 - if (!access_ok(VERIFY_WRITE, argp, size)) 1448 + if (!access_ok(argp, size)) 1449 1449 goto out; 1450 1450 } 1451 1451 if (_IOC_DIR(cmd) & _IOC_WRITE) { 1452 - if (!access_ok(VERIFY_READ, argp, size)) 1452 + if (!access_ok(argp, size)) 1453 1453 goto out; 1454 1454 } 1455 1455 rc = 0;
+3 -3
drivers/crypto/ccp/psp-dev.c
··· 364 364 goto cmd; 365 365 366 366 /* allocate a physically contiguous buffer to store the CSR blob */ 367 - if (!access_ok(VERIFY_WRITE, input.address, input.length) || 367 + if (!access_ok(input.address, input.length) || 368 368 input.length > SEV_FW_BLOB_MAX_SIZE) { 369 369 ret = -EFAULT; 370 370 goto e_free; ··· 644 644 645 645 /* Allocate a physically contiguous buffer to store the PDH blob. */ 646 646 if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) || 647 - !access_ok(VERIFY_WRITE, input.pdh_cert_address, input.pdh_cert_len)) { 647 + !access_ok(input.pdh_cert_address, input.pdh_cert_len)) { 648 648 ret = -EFAULT; 649 649 goto e_free; 650 650 } 651 651 652 652 /* Allocate a physically contiguous buffer to store the cert chain blob. */ 653 653 if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) || 654 - !access_ok(VERIFY_WRITE, input.cert_chain_address, input.cert_chain_len)) { 654 + !access_ok(input.cert_chain_address, input.cert_chain_len)) { 655 655 ret = -EFAULT; 656 656 goto e_free; 657 657 }
+1 -1
drivers/firewire/core-cdev.c
··· 1094 1094 return -EINVAL; 1095 1095 1096 1096 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); 1097 - if (!access_ok(VERIFY_READ, p, a->size)) 1097 + if (!access_ok(p, a->size)) 1098 1098 return -EFAULT; 1099 1099 1100 1100 end = (void __user *)p + a->size;
+4 -4
drivers/firmware/efi/test/efi_test.c
··· 68 68 return 0; 69 69 } 70 70 71 - if (!access_ok(VERIFY_READ, src, 1)) 71 + if (!access_ok(src, 1)) 72 72 return -EFAULT; 73 73 74 74 buf = memdup_user(src, len); ··· 89 89 static inline int 90 90 get_ucs2_strsize_from_user(efi_char16_t __user *src, size_t *len) 91 91 { 92 - if (!access_ok(VERIFY_READ, src, 1)) 92 + if (!access_ok(src, 1)) 93 93 return -EFAULT; 94 94 95 95 *len = user_ucs2_strsize(src); ··· 116 116 { 117 117 size_t len; 118 118 119 - if (!access_ok(VERIFY_READ, src, 1)) 119 + if (!access_ok(src, 1)) 120 120 return -EFAULT; 121 121 122 122 len = user_ucs2_strsize(src); ··· 140 140 if (!src) 141 141 return 0; 142 142 143 - if (!access_ok(VERIFY_WRITE, dst, 1)) 143 + if (!access_ok(dst, 1)) 144 144 return -EFAULT; 145 145 146 146 return copy_to_user(dst, src, len);
+1 -1
drivers/fpga/dfl-afu-dma-region.c
··· 369 369 if (user_addr + length < user_addr) 370 370 return -EINVAL; 371 371 372 - if (!access_ok(VERIFY_WRITE, (void __user *)(unsigned long)user_addr, 372 + if (!access_ok((void __user *)(unsigned long)user_addr, 373 373 length)) 374 374 return -EINVAL; 375 375
+1 -2
drivers/fpga/dfl-fme-pr.c
··· 99 99 return -EINVAL; 100 100 } 101 101 102 - if (!access_ok(VERIFY_READ, 103 - (void __user *)(unsigned long)port_pr.buffer_address, 102 + if (!access_ok((void __user *)(unsigned long)port_pr.buffer_address, 104 103 port_pr.buffer_size)) 105 104 return -EFAULT; 106 105
+6 -12
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 158 158 } 159 159 160 160 if ((args->ring_base_address) && 161 - (!access_ok(VERIFY_WRITE, 162 - (const void __user *) args->ring_base_address, 161 + (!access_ok((const void __user *) args->ring_base_address, 163 162 sizeof(uint64_t)))) { 164 163 pr_err("Can't access ring base address\n"); 165 164 return -EFAULT; ··· 169 170 return -EINVAL; 170 171 } 171 172 172 - if (!access_ok(VERIFY_WRITE, 173 - (const void __user *) args->read_pointer_address, 173 + if (!access_ok((const void __user *) args->read_pointer_address, 174 174 sizeof(uint32_t))) { 175 175 pr_err("Can't access read pointer\n"); 176 176 return -EFAULT; 177 177 } 178 178 179 - if (!access_ok(VERIFY_WRITE, 180 - (const void __user *) args->write_pointer_address, 179 + if (!access_ok((const void __user *) args->write_pointer_address, 181 180 sizeof(uint32_t))) { 182 181 pr_err("Can't access write pointer\n"); 183 182 return -EFAULT; 184 183 } 185 184 186 185 if (args->eop_buffer_address && 187 - !access_ok(VERIFY_WRITE, 188 - (const void __user *) args->eop_buffer_address, 186 + !access_ok((const void __user *) args->eop_buffer_address, 189 187 sizeof(uint32_t))) { 190 188 pr_debug("Can't access eop buffer"); 191 189 return -EFAULT; 192 190 } 193 191 194 192 if (args->ctx_save_restore_address && 195 - !access_ok(VERIFY_WRITE, 196 - (const void __user *) args->ctx_save_restore_address, 193 + !access_ok((const void __user *) args->ctx_save_restore_address, 197 194 sizeof(uint32_t))) { 198 195 pr_debug("Can't access ctx save restore buffer"); 199 196 return -EFAULT; ··· 360 365 } 361 366 362 367 if ((args->ring_base_address) && 363 - (!access_ok(VERIFY_WRITE, 364 - (const void __user *) args->ring_base_address, 368 + (!access_ok((const void __user *) args->ring_base_address, 365 369 sizeof(uint64_t)))) { 366 370 pr_err("Can't access ring base address\n"); 367 371 return -EFAULT;
+1 -1
drivers/gpu/drm/armada/armada_gem.c
··· 334 334 335 335 ptr = (char __user *)(uintptr_t)args->ptr; 336 336 337 - if (!access_ok(VERIFY_READ, ptr, args->size)) 337 + if (!access_ok(ptr, args->size)) 338 338 return -EFAULT; 339 339 340 340 ret = fault_in_pages_readable(ptr, args->size);
+1 -1
drivers/gpu/drm/drm_file.c
··· 525 525 struct drm_device *dev = file_priv->minor->dev; 526 526 ssize_t ret; 527 527 528 - if (!access_ok(VERIFY_WRITE, buffer, count)) 528 + if (!access_ok(buffer, count)) 529 529 return -EFAULT; 530 530 531 531 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
+1 -7
drivers/gpu/drm/etnaviv/etnaviv_drv.c
··· 339 339 struct drm_file *file) 340 340 { 341 341 struct drm_etnaviv_gem_userptr *args = data; 342 - int access; 343 342 344 343 if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) || 345 344 args->flags == 0) ··· 350 351 args->user_ptr & ~PAGE_MASK) 351 352 return -EINVAL; 352 353 353 - if (args->flags & ETNA_USERPTR_WRITE) 354 - access = VERIFY_WRITE; 355 - else 356 - access = VERIFY_READ; 357 - 358 - if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr, 354 + if (!access_ok((void __user *)(unsigned long)args->user_ptr, 359 355 args->user_size)) 360 356 return -EFAULT; 361 357
+2 -5
drivers/gpu/drm/i915/i915_gem.c
··· 1282 1282 if (args->size == 0) 1283 1283 return 0; 1284 1284 1285 - if (!access_ok(VERIFY_WRITE, 1286 - u64_to_user_ptr(args->data_ptr), 1285 + if (!access_ok(u64_to_user_ptr(args->data_ptr), 1287 1286 args->size)) 1288 1287 return -EFAULT; 1289 1288 ··· 1608 1609 if (args->size == 0) 1609 1610 return 0; 1610 1611 1611 - if (!access_ok(VERIFY_READ, 1612 - u64_to_user_ptr(args->data_ptr), 1613 - args->size)) 1612 + if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size)) 1614 1613 return -EFAULT; 1615 1614 1616 1615 obj = i915_gem_object_lookup(file, args->handle);
+3 -3
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 1447 1447 * to read. However, if the array is not writable the user loses 1448 1448 * the updated relocation values. 1449 1449 */ 1450 - if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs)))) 1450 + if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs)))) 1451 1451 return -EFAULT; 1452 1452 1453 1453 do { ··· 1554 1554 1555 1555 addr = u64_to_user_ptr(entry->relocs_ptr); 1556 1556 size *= sizeof(struct drm_i915_gem_relocation_entry); 1557 - if (!access_ok(VERIFY_READ, addr, size)) 1557 + if (!access_ok(addr, size)) 1558 1558 return -EFAULT; 1559 1559 1560 1560 end = addr + size; ··· 2090 2090 return ERR_PTR(-EINVAL); 2091 2091 2092 2092 user = u64_to_user_ptr(args->cliprects_ptr); 2093 - if (!access_ok(VERIFY_READ, user, nfences * sizeof(*user))) 2093 + if (!access_ok(user, nfences * sizeof(*user))) 2094 2094 return ERR_PTR(-EFAULT); 2095 2095 2096 2096 fences = kvmalloc_array(nfences, sizeof(*fences),
+1 -2
drivers/gpu/drm/i915/i915_gem_userptr.c
··· 789 789 if (offset_in_page(args->user_ptr | args->user_size)) 790 790 return -EINVAL; 791 791 792 - if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, 793 - (char __user *)(unsigned long)args->user_ptr, args->user_size)) 792 + if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size)) 794 793 return -EFAULT; 795 794 796 795 if (args->flags & I915_USERPTR_READ_ONLY) {
+1 -1
drivers/gpu/drm/i915/i915_ioc32.c
··· 52 52 return -EFAULT; 53 53 54 54 request = compat_alloc_user_space(sizeof(*request)); 55 - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || 55 + if (!access_ok(request, sizeof(*request)) || 56 56 __put_user(req32.param, &request->param) || 57 57 __put_user((void __user *)(unsigned long)req32.value, 58 58 &request->value))
+1 -1
drivers/gpu/drm/i915/i915_perf.c
··· 3052 3052 if (!n_regs) 3053 3053 return NULL; 3054 3054 3055 - if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2)) 3055 + if (!access_ok(regs, n_regs * sizeof(u32) * 2)) 3056 3056 return ERR_PTR(-EFAULT); 3057 3057 3058 3058 /* No is_valid function means we're not allowing any register to be programmed. */
+1 -1
drivers/gpu/drm/i915/i915_query.c
··· 46 46 if (topo.flags != 0) 47 47 return -EINVAL; 48 48 49 - if (!access_ok(VERIFY_WRITE, u64_to_user_ptr(query_item->data_ptr), 49 + if (!access_ok(u64_to_user_ptr(query_item->data_ptr), 50 50 total_length)) 51 51 return -EFAULT; 52 52
+1 -1
drivers/gpu/drm/msm/msm_gem_submit.c
··· 77 77 static inline unsigned long __must_check 78 78 copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 79 79 { 80 - if (access_ok(VERIFY_READ, from, n)) 80 + if (access_ok(from, n)) 81 81 return __copy_from_user_inatomic(to, from, n); 82 82 return -EFAULT; 83 83 }
+1 -2
drivers/gpu/drm/qxl/qxl_ioctl.c
··· 163 163 if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info)) 164 164 return -EINVAL; 165 165 166 - if (!access_ok(VERIFY_READ, 167 - u64_to_user_ptr(cmd->command), 166 + if (!access_ok(u64_to_user_ptr(cmd->command), 168 167 cmd->command_size)) 169 168 return -EFAULT; 170 169
+1 -2
drivers/infiniband/core/uverbs_main.c
··· 611 611 if (hdr->out_words * 8 < method_elm->resp_size) 612 612 return -ENOSPC; 613 613 614 - if (!access_ok(VERIFY_WRITE, 615 - u64_to_user_ptr(ex_hdr->response), 614 + if (!access_ok(u64_to_user_ptr(ex_hdr->response), 616 615 (hdr->out_words + ex_hdr->provider_out_words) * 8)) 617 616 return -EFAULT; 618 617 } else {
+1 -1
drivers/infiniband/hw/hfi1/user_exp_rcv.c
··· 232 232 } 233 233 234 234 /* Verify that access is OK for the user buffer */ 235 - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 235 + if (!access_ok((void __user *)vaddr, 236 236 npages * PAGE_SIZE)) { 237 237 dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n", 238 238 (void *)vaddr, npages);
+1 -1
drivers/infiniband/hw/qib/qib_file_ops.c
··· 343 343 344 344 /* virtual address of first page in transfer */ 345 345 vaddr = ti->tidvaddr; 346 - if (!access_ok(VERIFY_WRITE, (void __user *) vaddr, 346 + if (!access_ok((void __user *) vaddr, 347 347 cnt * PAGE_SIZE)) { 348 348 ret = -EFAULT; 349 349 goto done;
+1 -1
drivers/macintosh/ans-lcd.c
··· 64 64 printk(KERN_DEBUG "LCD: write\n"); 65 65 #endif 66 66 67 - if (!access_ok(VERIFY_READ, buf, count)) 67 + if (!access_ok(buf, count)) 68 68 return -EFAULT; 69 69 70 70 mutex_lock(&anslcd_mutex);
+1 -1
drivers/macintosh/via-pmu.c
··· 2188 2188 2189 2189 if (count < 1 || !pp) 2190 2190 return -EINVAL; 2191 - if (!access_ok(VERIFY_WRITE, buf, count)) 2191 + if (!access_ok(buf, count)) 2192 2192 return -EFAULT; 2193 2193 2194 2194 spin_lock_irqsave(&pp->lock, flags);
+1 -1
drivers/media/pci/ivtv/ivtvfb.c
··· 356 356 IVTVFB_WARN("ivtvfb_prep_frame: Count not a multiple of 4 (%d)\n", count); 357 357 358 358 /* Check Source */ 359 - if (!access_ok(VERIFY_READ, source + dest_offset, count)) { 359 + if (!access_ok(source + dest_offset, count)) { 360 360 IVTVFB_WARN("Invalid userspace pointer %p\n", source); 361 361 362 362 IVTVFB_DEBUG_WARN("access_ok() failed for offset 0x%08lx source %p count %d\n",
+23 -23
drivers/media/v4l2-core/v4l2-compat-ioctl32.c
··· 158 158 compat_caddr_t p; 159 159 u32 clipcount; 160 160 161 - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 161 + if (!access_ok(p32, sizeof(*p32)) || 162 162 copy_in_user(&p64->w, &p32->w, sizeof(p32->w)) || 163 163 assign_in_user(&p64->field, &p32->field) || 164 164 assign_in_user(&p64->chromakey, &p32->chromakey) || ··· 283 283 284 284 static int bufsize_v4l2_format(struct v4l2_format32 __user *p32, u32 *size) 285 285 { 286 - if (!access_ok(VERIFY_READ, p32, sizeof(*p32))) 286 + if (!access_ok(p32, sizeof(*p32))) 287 287 return -EFAULT; 288 288 return __bufsize_v4l2_format(p32, size); 289 289 } ··· 335 335 struct v4l2_format32 __user *p32, 336 336 void __user *aux_buf, u32 aux_space) 337 337 { 338 - if (!access_ok(VERIFY_READ, p32, sizeof(*p32))) 338 + if (!access_ok(p32, sizeof(*p32))) 339 339 return -EFAULT; 340 340 return __get_v4l2_format32(p64, p32, aux_buf, aux_space); 341 341 } ··· 343 343 static int bufsize_v4l2_create(struct v4l2_create_buffers32 __user *p32, 344 344 u32 *size) 345 345 { 346 - if (!access_ok(VERIFY_READ, p32, sizeof(*p32))) 346 + if (!access_ok(p32, sizeof(*p32))) 347 347 return -EFAULT; 348 348 return __bufsize_v4l2_format(&p32->format, size); 349 349 } ··· 352 352 struct v4l2_create_buffers32 __user *p32, 353 353 void __user *aux_buf, u32 aux_space) 354 354 { 355 - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 355 + if (!access_ok(p32, sizeof(*p32)) || 356 356 copy_in_user(p64, p32, 357 357 offsetof(struct v4l2_create_buffers32, format))) 358 358 return -EFAULT; ··· 404 404 static int put_v4l2_format32(struct v4l2_format __user *p64, 405 405 struct v4l2_format32 __user *p32) 406 406 { 407 - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32))) 407 + if (!access_ok(p32, sizeof(*p32))) 408 408 return -EFAULT; 409 409 return __put_v4l2_format32(p64, p32); 410 410 } ··· 412 412 static int put_v4l2_create32(struct v4l2_create_buffers __user *p64, 413 413 struct v4l2_create_buffers32 __user *p32) 414 414 { 415 - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || 415 + if (!access_ok(p32, sizeof(*p32)) || 416 416 copy_in_user(p32, p64, 417 417 offsetof(struct v4l2_create_buffers32, format)) || 418 418 assign_in_user(&p32->capabilities, &p64->capabilities) || ··· 434 434 struct v4l2_standard32 __user *p32) 435 435 { 436 436 /* other fields are not set by the user, nor used by the driver */ 437 - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 437 + if (!access_ok(p32, sizeof(*p32)) || 438 438 assign_in_user(&p64->index, &p32->index)) 439 439 return -EFAULT; 440 440 return 0; ··· 443 443 static int put_v4l2_standard32(struct v4l2_standard __user *p64, 444 444 struct v4l2_standard32 __user *p32) 445 445 { 446 - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || 446 + if (!access_ok(p32, sizeof(*p32)) || 447 447 assign_in_user(&p32->index, &p64->index) || 448 448 assign_in_user(&p32->id, &p64->id) || 449 449 copy_in_user(p32->name, p64->name, sizeof(p32->name)) || ··· 560 560 u32 type; 561 561 u32 length; 562 562 563 - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 563 + if (!access_ok(p32, sizeof(*p32)) || 564 564 get_user(type, &p32->type) || 565 565 get_user(length, &p32->length)) 566 566 return -EFAULT; ··· 593 593 compat_caddr_t p; 594 594 int ret; 595 595 596 - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 596 + if (!access_ok(p32, sizeof(*p32)) || 597 597 assign_in_user(&p64->index, &p32->index) || 598 598 get_user(type, &p32->type) || 599 599 put_user(type, &p64->type) || ··· 632 632 return -EFAULT; 633 633 634 634 uplane32 = compat_ptr(p); 635 - if (!access_ok(VERIFY_READ, uplane32, 635 + if (!access_ok(uplane32, 636 636 num_planes * sizeof(*uplane32))) 637 637 return -EFAULT; 638 638 ··· 691 691 compat_caddr_t p; 692 692 int ret; 693 693 694 - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || 694 + if (!access_ok(p32, sizeof(*p32)) || 695 695 assign_in_user(&p32->index, &p64->index) || 696 696 get_user(type, &p64->type) || 697 697 put_user(type, &p32->type) || ··· 781 781 { 782 782 compat_caddr_t tmp; 783 783 784 - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 784 + if (!access_ok(p32, sizeof(*p32)) || 785 785 get_user(tmp, &p32->base) || 786 786 put_user_force(compat_ptr(tmp), &p64->base) || 787 787 assign_in_user(&p64->capability, &p32->capability) || ··· 796 796 { 797 797 void *base; 798 798 799 - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || 799 + if (!access_ok(p32, sizeof(*p32)) || 800 800 get_user(base, &p64->base) || 801 801 put_user(ptr_to_compat((void __user *)base), &p32->base) || 802 802 assign_in_user(&p32->capability, &p64->capability) || ··· 893 893 { 894 894 u32 count; 895 895 896 - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 896 + if (!access_ok(p32, sizeof(*p32)) || 897 897 get_user(count, &p32->count)) 898 898 return -EFAULT; 899 899 if (count > V4L2_CID_MAX_CTRLS) ··· 913 913 u32 n; 914 914 compat_caddr_t p; 915 915 916 - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 916 + if (!access_ok(p32, sizeof(*p32)) || 917 917 assign_in_user(&p64->which, &p32->which) || 918 918 get_user(count, &p32->count) || 919 919 put_user(count, &p64->count) || ··· 929 929 if (get_user(p, &p32->controls)) 930 930 return -EFAULT; 931 931 ucontrols = compat_ptr(p); 932 - if (!access_ok(VERIFY_READ, ucontrols, count * sizeof(*ucontrols))) 932 + if (!access_ok(ucontrols, count * sizeof(*ucontrols))) 933 933 return -EFAULT; 934 934 if (aux_space < count * sizeof(*kcontrols)) 935 935 return -EFAULT; ··· 979 979 * with __user causes smatch warnings, so instead declare it 980 980 * without __user and cast it as a userspace pointer where needed. 981 981 */ 982 - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || 982 + if (!access_ok(p32, sizeof(*p32)) || 983 983 assign_in_user(&p32->which, &p64->which) || 984 984 get_user(count, &p64->count) || 985 985 put_user(count, &p32->count) || ··· 994 994 if (get_user(p, &p32->controls)) 995 995 return -EFAULT; 996 996 ucontrols = compat_ptr(p); 997 - if (!access_ok(VERIFY_WRITE, ucontrols, count * sizeof(*ucontrols))) 997 + if (!access_ok(ucontrols, count * sizeof(*ucontrols))) 998 998 return -EFAULT; 999 999 1000 1000 for (n = 0; n < count; n++) { ··· 1043 1043 static int put_v4l2_event32(struct v4l2_event __user *p64, 1044 1044 struct v4l2_event32 __user *p32) 1045 1045 { 1046 - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || 1046 + if (!access_ok(p32, sizeof(*p32)) || 1047 1047 assign_in_user(&p32->type, &p64->type) || 1048 1048 copy_in_user(&p32->u, &p64->u, sizeof(p64->u)) || 1049 1049 assign_in_user(&p32->pending, &p64->pending) || ··· 1069 1069 { 1070 1070 compat_uptr_t tmp; 1071 1071 1072 - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 1072 + if (!access_ok(p32, sizeof(*p32)) || 1073 1073 assign_in_user(&p64->pad, &p32->pad) || 1074 1074 assign_in_user(&p64->start_block, &p32->start_block) || 1075 1075 assign_in_user_cast(&p64->blocks, &p32->blocks) || ··· 1085 1085 { 1086 1086 void *edid; 1087 1087 1088 - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || 1088 + if (!access_ok(p32, sizeof(*p32)) || 1089 1089 assign_in_user(&p32->pad, &p64->pad) || 1090 1090 assign_in_user(&p32->start_block, &p64->start_block) || 1091 1091 assign_in_user(&p32->blocks, &p64->blocks) ||
+1 -1
drivers/misc/vmw_vmci/vmci_host.c
··· 236 236 * about the size. 237 237 */ 238 238 BUILD_BUG_ON(sizeof(bool) != sizeof(u8)); 239 - if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8))) 239 + if (!access_ok((void __user *)uva, sizeof(u8))) 240 240 return VMCI_ERROR_GENERIC; 241 241 242 242 /*
+2 -2
drivers/pci/proc.c
··· 52 52 nbytes = size - pos; 53 53 cnt = nbytes; 54 54 55 - if (!access_ok(VERIFY_WRITE, buf, cnt)) 55 + if (!access_ok(buf, cnt)) 56 56 return -EINVAL; 57 57 58 58 pci_config_pm_runtime_get(dev); ··· 125 125 nbytes = size - pos; 126 126 cnt = nbytes; 127 127 128 - if (!access_ok(VERIFY_READ, buf, cnt)) 128 + if (!access_ok(buf, cnt)) 129 129 return -EINVAL; 130 130 131 131 pci_config_pm_runtime_get(dev);
+1 -2
drivers/platform/goldfish/goldfish_pipe.c
··· 416 416 if (unlikely(bufflen == 0)) 417 417 return 0; 418 418 /* Check the buffer range for access */ 419 - if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ, 420 - buffer, bufflen))) 419 + if (unlikely(!access_ok(buffer, bufflen))) 421 420 return -EFAULT; 422 421 423 422 address = (unsigned long)buffer;
+1 -1
drivers/pnp/isapnp/proc.c
··· 47 47 nbytes = size - pos; 48 48 cnt = nbytes; 49 49 50 - if (!access_ok(VERIFY_WRITE, buf, cnt)) 50 + if (!access_ok(buf, cnt)) 51 51 return -EINVAL; 52 52 53 53 isapnp_cfg_begin(dev->card->number, dev->number);
+1 -3
drivers/scsi/pmcraid.c
··· 3600 3600 u32 ioasc; 3601 3601 int request_size; 3602 3602 int buffer_size; 3603 - u8 access, direction; 3603 + u8 direction; 3604 3604 int rc = 0; 3605 3605 3606 3606 /* If IOA reset is in progress, wait 10 secs for reset to complete */ ··· 3649 3649 request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length); 3650 3650 3651 3651 if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) { 3652 - access = VERIFY_READ; 3653 3652 direction = DMA_TO_DEVICE; 3654 3653 } else { 3655 - access = VERIFY_WRITE; 3656 3654 direction = DMA_FROM_DEVICE; 3657 3655 } 3658 3656
+1 -1
drivers/scsi/scsi_ioctl.c
··· 221 221 222 222 switch (cmd) { 223 223 case SCSI_IOCTL_GET_IDLUN: 224 - if (!access_ok(VERIFY_WRITE, arg, sizeof(struct scsi_idlun))) 224 + if (!access_ok(arg, sizeof(struct scsi_idlun))) 225 225 return -EFAULT; 226 226 227 227 __put_user((sdev->id & 0xff)
+8 -8
drivers/scsi/sg.c
··· 434 434 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, 435 435 "sg_read: count=%d\n", (int) count)); 436 436 437 - if (!access_ok(VERIFY_WRITE, buf, count)) 437 + if (!access_ok(buf, count)) 438 438 return -EFAULT; 439 439 if (sfp->force_packid && (count >= SZ_SG_HEADER)) { 440 440 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); ··· 632 632 scsi_block_when_processing_errors(sdp->device))) 633 633 return -ENXIO; 634 634 635 - if (!access_ok(VERIFY_READ, buf, count)) 635 + if (!access_ok(buf, count)) 636 636 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 637 637 if (count < SZ_SG_HEADER) 638 638 return -EIO; ··· 729 729 730 730 if (count < SZ_SG_IO_HDR) 731 731 return -EINVAL; 732 - if (!access_ok(VERIFY_READ, buf, count)) 732 + if (!access_ok(buf, count)) 733 733 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 734 734 735 735 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ ··· 768 768 sg_remove_request(sfp, srp); 769 769 return -EMSGSIZE; 770 770 } 771 - if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) { 771 + if (!access_ok(hp->cmdp, hp->cmd_len)) { 772 772 sg_remove_request(sfp, srp); 773 773 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 774 774 } ··· 922 922 return -ENODEV; 923 923 if (!scsi_block_when_processing_errors(sdp->device)) 924 924 return -ENXIO; 925 - if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) 925 + if (!access_ok(p, SZ_SG_IO_HDR)) 926 926 return -EFAULT; 927 927 result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, 928 928 1, read_only, 1, &srp); ··· 968 968 case SG_GET_LOW_DMA: 969 969 return put_user((int) sdp->device->host->unchecked_isa_dma, ip); 970 970 case SG_GET_SCSI_ID: 971 - if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t))) 971 + if (!access_ok(p, sizeof (sg_scsi_id_t))) 972 972 return -EFAULT; 973 973 else { 974 974 sg_scsi_id_t __user *sg_idp = p; ··· 997 997 sfp->force_packid = val ? 1 : 0; 998 998 return 0; 999 999 case SG_GET_PACK_ID: 1000 - if (!access_ok(VERIFY_WRITE, ip, sizeof (int))) 1000 + if (!access_ok(ip, sizeof (int))) 1001 1001 return -EFAULT; 1002 1002 read_lock_irqsave(&sfp->rq_list_lock, iflags); 1003 1003 list_for_each_entry(srp, &sfp->rq_list, entry) { ··· 1078 1078 val = (sdp->device ? 1 : 0); 1079 1079 return put_user(val, ip); 1080 1080 case SG_GET_REQUEST_TABLE: 1081 - if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE)) 1081 + if (!access_ok(p, SZ_SG_REQ_INFO * SG_MAX_QUEUE)) 1082 1082 return -EFAULT; 1083 1083 else { 1084 1084 sg_req_info_t *rinfo;
+12 -12
drivers/staging/comedi/comedi_compat32.c
··· 102 102 chaninfo = compat_alloc_user_space(sizeof(*chaninfo)); 103 103 104 104 /* Copy chaninfo structure. Ignore unused members. */ 105 - if (!access_ok(VERIFY_READ, chaninfo32, sizeof(*chaninfo32)) || 106 - !access_ok(VERIFY_WRITE, chaninfo, sizeof(*chaninfo))) 105 + if (!access_ok(chaninfo32, sizeof(*chaninfo32)) || 106 + !access_ok(chaninfo, sizeof(*chaninfo))) 107 107 return -EFAULT; 108 108 109 109 err = 0; ··· 136 136 rangeinfo = compat_alloc_user_space(sizeof(*rangeinfo)); 137 137 138 138 /* Copy rangeinfo structure. */ 139 - if (!access_ok(VERIFY_READ, rangeinfo32, sizeof(*rangeinfo32)) || 140 - !access_ok(VERIFY_WRITE, rangeinfo, sizeof(*rangeinfo))) 139 + if (!access_ok(rangeinfo32, sizeof(*rangeinfo32)) || 140 + !access_ok(rangeinfo, sizeof(*rangeinfo))) 141 141 return -EFAULT; 142 142 143 143 err = 0; ··· 163 163 } temp; 164 164 165 165 /* Copy cmd structure. */ 166 - if (!access_ok(VERIFY_READ, cmd32, sizeof(*cmd32)) || 167 - !access_ok(VERIFY_WRITE, cmd, sizeof(*cmd))) 166 + if (!access_ok(cmd32, sizeof(*cmd32)) || 167 + !access_ok(cmd, sizeof(*cmd))) 168 168 return -EFAULT; 169 169 170 170 err = 0; ··· 217 217 * Assume the pointer values are already valid. 218 218 * (Could use ptr_to_compat() to set them.) 219 219 */ 220 - if (!access_ok(VERIFY_READ, cmd, sizeof(*cmd)) || 221 - !access_ok(VERIFY_WRITE, cmd32, sizeof(*cmd32))) 220 + if (!access_ok(cmd, sizeof(*cmd)) || 221 + !access_ok(cmd32, sizeof(*cmd32))) 222 222 return -EFAULT; 223 223 224 224 err = 0; ··· 317 317 318 318 /* Copy insn structure. Ignore the unused members. */ 319 319 err = 0; 320 - if (!access_ok(VERIFY_READ, insn32, sizeof(*insn32)) || 321 - !access_ok(VERIFY_WRITE, insn, sizeof(*insn))) 320 + if (!access_ok(insn32, sizeof(*insn32)) || 321 + !access_ok(insn, sizeof(*insn))) 322 322 return -EFAULT; 323 323 324 324 err |= __get_user(temp.uint, &insn32->insn); ··· 350 350 insnlist32 = compat_ptr(arg); 351 351 352 352 /* Get 32-bit insnlist structure. */ 353 - if (!access_ok(VERIFY_READ, insnlist32, sizeof(*insnlist32))) 353 + if (!access_ok(insnlist32, sizeof(*insnlist32))) 354 354 return -EFAULT; 355 355 356 356 err = 0; ··· 365 365 insn[n_insns])); 366 366 367 367 /* Set native insnlist structure. */ 368 - if (!access_ok(VERIFY_WRITE, &s->insnlist, sizeof(s->insnlist))) 368 + if (!access_ok(&s->insnlist, sizeof(s->insnlist))) 369 369 return -EFAULT; 370 370 371 371 err |= __put_user(n_insns, &s->insnlist.n_insns);
+1 -1
drivers/tty/n_hdlc.c
··· 573 573 return -EIO; 574 574 575 575 /* verify user access to buffer */ 576 - if (!access_ok(VERIFY_WRITE, buf, nr)) { 576 + if (!access_ok(buf, nr)) { 577 577 printk(KERN_WARNING "%s(%d) n_hdlc_tty_read() can't verify user " 578 578 "buffer\n", __FILE__, __LINE__); 579 579 return -EFAULT;
+1 -1
drivers/usb/core/devices.c
··· 598 598 return -EINVAL; 599 599 if (nbytes <= 0) 600 600 return 0; 601 - if (!access_ok(VERIFY_WRITE, buf, nbytes)) 601 + if (!access_ok(buf, nbytes)) 602 602 return -EFAULT; 603 603 604 604 mutex_lock(&usb_bus_idr_lock);
+3 -4
drivers/usb/core/devio.c
··· 1094 1094 ctrl.bRequestType, ctrl.bRequest, ctrl.wValue, 1095 1095 ctrl.wIndex, ctrl.wLength); 1096 1096 if (ctrl.bRequestType & 0x80) { 1097 - if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data, 1097 + if (ctrl.wLength && !access_ok(ctrl.data, 1098 1098 ctrl.wLength)) { 1099 1099 ret = -EINVAL; 1100 1100 goto done; ··· 1183 1183 } 1184 1184 tmo = bulk.timeout; 1185 1185 if (bulk.ep & 0x80) { 1186 - if (len1 && !access_ok(VERIFY_WRITE, bulk.data, len1)) { 1186 + if (len1 && !access_ok(bulk.data, len1)) { 1187 1187 ret = -EINVAL; 1188 1188 goto done; 1189 1189 } ··· 1584 1584 } 1585 1585 1586 1586 if (uurb->buffer_length > 0 && 1587 - !access_ok(is_in ? VERIFY_WRITE : VERIFY_READ, 1588 - uurb->buffer, uurb->buffer_length)) { 1587 + !access_ok(uurb->buffer, uurb->buffer_length)) { 1589 1588 ret = -EFAULT; 1590 1589 goto error; 1591 1590 }
+2 -2
drivers/usb/gadget/function/f_hid.c
··· 252 252 if (!count) 253 253 return 0; 254 254 255 - if (!access_ok(VERIFY_WRITE, buffer, count)) 255 + if (!access_ok(buffer, count)) 256 256 return -EFAULT; 257 257 258 258 spin_lock_irqsave(&hidg->read_spinlock, flags); ··· 339 339 unsigned long flags; 340 340 ssize_t status = -ENOMEM; 341 341 342 - if (!access_ok(VERIFY_READ, buffer, count)) 342 + if (!access_ok(buffer, count)) 343 343 return -EFAULT; 344 344 345 345 spin_lock_irqsave(&hidg->write_spinlock, flags);
+1 -1
drivers/usb/gadget/udc/atmel_usba_udc.c
··· 88 88 size_t len, remaining, actual = 0; 89 89 char tmpbuf[38]; 90 90 91 - if (!access_ok(VERIFY_WRITE, buf, nbytes)) 91 + if (!access_ok(buf, nbytes)) 92 92 return -EFAULT; 93 93 94 94 inode_lock(file_inode(file));
+8 -8
drivers/vhost/vhost.c
··· 655 655 a + (unsigned long)log_base > ULONG_MAX) 656 656 return false; 657 657 658 - return access_ok(VERIFY_WRITE, log_base + a, 658 + return access_ok(log_base + a, 659 659 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); 660 660 } 661 661 ··· 681 681 return false; 682 682 683 683 684 - if (!access_ok(VERIFY_WRITE, (void __user *)a, 684 + if (!access_ok((void __user *)a, 685 685 node->size)) 686 686 return false; 687 687 else if (log_all && !log_access_ok(log_base, ··· 973 973 return false; 974 974 975 975 if ((access & VHOST_ACCESS_RO) && 976 - !access_ok(VERIFY_READ, (void __user *)a, size)) 976 + !access_ok((void __user *)a, size)) 977 977 return false; 978 978 if ((access & VHOST_ACCESS_WO) && 979 - !access_ok(VERIFY_WRITE, (void __user *)a, size)) 979 + !access_ok((void __user *)a, size)) 980 980 return false; 981 981 return true; 982 982 } ··· 1185 1185 { 1186 1186 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 1187 1187 1188 - return access_ok(VERIFY_READ, desc, num * sizeof *desc) && 1189 - access_ok(VERIFY_READ, avail, 1188 + return access_ok(desc, num * sizeof *desc) && 1189 + access_ok(avail, 1190 1190 sizeof *avail + num * sizeof *avail->ring + s) && 1191 - access_ok(VERIFY_WRITE, used, 1191 + access_ok(used, 1192 1192 sizeof *used + num * sizeof *used->ring + s); 1193 1193 } 1194 1194 ··· 1814 1814 goto err; 1815 1815 vq->signalled_used_valid = false; 1816 1816 if (!vq->iotlb && 1817 - !access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) { 1817 + !access_ok(&vq->used->idx, sizeof vq->used->idx)) { 1818 1818 r = -EFAULT; 1819 1819 goto err; 1820 1820 }
+2 -2
drivers/video/fbdev/amifb.c
··· 1855 1855 var->yspot = par->crsr.spot_y; 1856 1856 if (size > var->height * var->width) 1857 1857 return -ENAMETOOLONG; 1858 - if (!access_ok(VERIFY_WRITE, data, size)) 1858 + if (!access_ok(data, size)) 1859 1859 return -EFAULT; 1860 1860 delta = 1 << par->crsr.fmode; 1861 1861 lspr = lofsprite + (delta << 1); ··· 1935 1935 return -EINVAL; 1936 1936 if (!var->height) 1937 1937 return -EINVAL; 1938 - if (!access_ok(VERIFY_READ, data, var->width * var->height)) 1938 + if (!access_ok(data, var->width * var->height)) 1939 1939 return -EFAULT; 1940 1940 delta = 1 << fmode; 1941 1941 lofsprite = shfsprite = (u_short *)spritememory;
+1 -1
drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
··· 493 493 if (!display || !display->driver->memory_read) 494 494 return -ENOENT; 495 495 496 - if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size)) 496 + if (!access_ok(mr->buffer, mr->buffer_size)) 497 497 return -EFAULT; 498 498 499 499 if (mr->w > 4096 || mr->h > 4096)
+3 -3
drivers/xen/privcmd.c
··· 459 459 return -EFAULT; 460 460 /* Returns per-frame error in m.arr. */ 461 461 m.err = NULL; 462 - if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr))) 462 + if (!access_ok(m.arr, m.num * sizeof(*m.arr))) 463 463 return -EFAULT; 464 464 break; 465 465 case 2: 466 466 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2))) 467 467 return -EFAULT; 468 468 /* Returns per-frame error code in m.err. */ 469 - if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err)))) 469 + if (!access_ok(m.err, m.num * (sizeof(*m.err)))) 470 470 return -EFAULT; 471 471 break; 472 472 default: ··· 661 661 goto out; 662 662 } 663 663 664 - if (!access_ok(VERIFY_WRITE, kbufs[i].uptr, 664 + if (!access_ok(kbufs[i].uptr, 665 665 kbufs[i].size)) { 666 666 rc = -EFAULT; 667 667 goto out;
+2 -2
fs/binfmt_aout.c
··· 78 78 79 79 /* make sure we actually have a data and stack area to dump */ 80 80 set_fs(USER_DS); 81 - if (!access_ok(VERIFY_READ, START_DATA(dump), dump.u_dsize << PAGE_SHIFT)) 81 + if (!access_ok(START_DATA(dump), dump.u_dsize << PAGE_SHIFT)) 82 82 dump.u_dsize = 0; 83 - if (!access_ok(VERIFY_READ, START_STACK(dump), dump.u_ssize << PAGE_SHIFT)) 83 + if (!access_ok(START_STACK(dump), dump.u_ssize << PAGE_SHIFT)) 84 84 dump.u_ssize = 0; 85 85 86 86 set_fs(KERNEL_DS);
+1 -1
fs/btrfs/send.c
··· 6646 6646 goto out; 6647 6647 } 6648 6648 6649 - if (!access_ok(VERIFY_READ, arg->clone_sources, 6649 + if (!access_ok(arg->clone_sources, 6650 6650 sizeof(*arg->clone_sources) * 6651 6651 arg->clone_sources_count)) { 6652 6652 ret = -EFAULT;
+1 -1
fs/eventpoll.c
··· 2172 2172 return -EINVAL; 2173 2173 2174 2174 /* Verify that the area passed by the user is writeable */ 2175 - if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) 2175 + if (!access_ok(events, maxevents * sizeof(struct epoll_event))) 2176 2176 return -EFAULT; 2177 2177 2178 2178 /* Get the "struct file *" for the eventpoll file */
+2 -2
fs/fat/dir.c
··· 805 805 return fat_generic_ioctl(filp, cmd, arg); 806 806 } 807 807 808 - if (!access_ok(VERIFY_WRITE, d1, sizeof(struct __fat_dirent[2]))) 808 + if (!access_ok(d1, sizeof(struct __fat_dirent[2]))) 809 809 return -EFAULT; 810 810 /* 811 811 * Yes, we don't need this put_user() absolutely. However old ··· 845 845 return fat_generic_ioctl(filp, cmd, (unsigned long)arg); 846 846 } 847 847 848 - if (!access_ok(VERIFY_WRITE, d1, sizeof(struct compat_dirent[2]))) 848 + if (!access_ok(d1, sizeof(struct compat_dirent[2]))) 849 849 return -EFAULT; 850 850 /* 851 851 * Yes, we don't need this put_user() absolutely. However old
+1 -1
fs/ioctl.c
··· 203 203 fieinfo.fi_extents_start = ufiemap->fm_extents; 204 204 205 205 if (fiemap.fm_extent_count != 0 && 206 - !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start, 206 + !access_ok(fieinfo.fi_extents_start, 207 207 fieinfo.fi_extents_max * sizeof(struct fiemap_extent))) 208 208 return -EFAULT; 209 209
+1 -1
fs/namespace.c
··· 2651 2651 const char __user *f = from; 2652 2652 char c; 2653 2653 2654 - if (!access_ok(VERIFY_READ, from, n)) 2654 + if (!access_ok(from, n)) 2655 2655 return n; 2656 2656 2657 2657 current->kernel_uaccess_faults_ok++;
+2 -2
fs/ocfs2/dlmfs/dlmfs.c
··· 254 254 if (!count) 255 255 return 0; 256 256 257 - if (!access_ok(VERIFY_WRITE, buf, count)) 257 + if (!access_ok(buf, count)) 258 258 return -EFAULT; 259 259 260 260 /* don't read past the lvb */ ··· 302 302 if (!count) 303 303 return 0; 304 304 305 - if (!access_ok(VERIFY_READ, buf, count)) 305 + if (!access_ok(buf, count)) 306 306 return -EFAULT; 307 307 308 308 /* don't write past the lvb */
+1 -1
fs/pstore/pmsg.c
··· 33 33 record.size = count; 34 34 35 35 /* check outside lock, page in any data. write_user also checks */ 36 - if (!access_ok(VERIFY_READ, buf, count)) 36 + if (!access_ok(buf, count)) 37 37 return -EFAULT; 38 38 39 39 mutex_lock(&pmsg_lock);
+1 -1
fs/pstore/ram_core.c
··· 357 357 int rem, ret = 0, c = count; 358 358 size_t start; 359 359 360 - if (unlikely(!access_ok(VERIFY_READ, s, count))) 360 + if (unlikely(!access_ok(s, count))) 361 361 return -EFAULT; 362 362 if (unlikely(c > prz->buffer_size)) { 363 363 s += c - prz->buffer_size;
+5 -8
fs/read_write.c
··· 442 442 return -EBADF; 443 443 if (!(file->f_mode & FMODE_CAN_READ)) 444 444 return -EINVAL; 445 - if (unlikely(!access_ok(VERIFY_WRITE, buf, count))) 445 + if (unlikely(!access_ok(buf, count))) 446 446 return -EFAULT; 447 447 448 448 ret = rw_verify_area(READ, file, pos, count); ··· 538 538 return -EBADF; 539 539 if (!(file->f_mode & FMODE_CAN_WRITE)) 540 540 return -EINVAL; 541 - if (unlikely(!access_ok(VERIFY_READ, buf, count))) 541 + if (unlikely(!access_ok(buf, count))) 542 542 return -EFAULT; 543 543 544 544 ret = rw_verify_area(WRITE, file, pos, count); ··· 718 718 return ret; 719 719 } 720 720 721 - /* A write operation does a read from user space and vice versa */ 722 - #define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ) 723 - 724 721 /** 725 722 * rw_copy_check_uvector() - Copy an array of &struct iovec from userspace 726 723 * into the kernel and check that it is valid. ··· 807 810 goto out; 808 811 } 809 812 if (type >= 0 810 - && unlikely(!access_ok(vrfy_dir(type), buf, len))) { 813 + && unlikely(!access_ok(buf, len))) { 811 814 ret = -EFAULT; 812 815 goto out; 813 816 } ··· 853 856 *ret_pointer = iov; 854 857 855 858 ret = -EFAULT; 856 - if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector))) 859 + if (!access_ok(uvector, nr_segs*sizeof(*uvector))) 857 860 goto out; 858 861 859 862 /* ··· 878 881 if (len < 0) /* size_t not fitting in compat_ssize_t .. */ 879 882 goto out; 880 883 if (type >= 0 && 881 - !access_ok(vrfy_dir(type), compat_ptr(buf), len)) { 884 + !access_ok(compat_ptr(buf), len)) { 882 885 ret = -EFAULT; 883 886 goto out; 884 887 }
+5 -5
fs/readdir.c
··· 105 105 } 106 106 buf->result++; 107 107 dirent = buf->dirent; 108 - if (!access_ok(VERIFY_WRITE, dirent, 108 + if (!access_ok(dirent, 109 109 (unsigned long)(dirent->d_name + namlen + 1) - 110 110 (unsigned long)dirent)) 111 111 goto efault; ··· 221 221 }; 222 222 int error; 223 223 224 - if (!access_ok(VERIFY_WRITE, dirent, count)) 224 + if (!access_ok(dirent, count)) 225 225 return -EFAULT; 226 226 227 227 f = fdget_pos(fd); ··· 304 304 }; 305 305 int error; 306 306 307 - if (!access_ok(VERIFY_WRITE, dirent, count)) 307 + if (!access_ok(dirent, count)) 308 308 return -EFAULT; 309 309 310 310 f = fdget_pos(fd); ··· 365 365 } 366 366 buf->result++; 367 367 dirent = buf->dirent; 368 - if (!access_ok(VERIFY_WRITE, dirent, 368 + if (!access_ok(dirent, 369 369 (unsigned long)(dirent->d_name + namlen + 1) - 370 370 (unsigned long)dirent)) 371 371 goto efault; ··· 475 475 }; 476 476 int error; 477 477 478 - if (!access_ok(VERIFY_WRITE, dirent, count)) 478 + if (!access_ok(dirent, count)) 479 479 return -EFAULT; 480 480 481 481 f = fdget_pos(fd);
+4 -7
fs/select.c
··· 381 381 #define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long)) 382 382 383 383 /* 384 - * We do a VERIFY_WRITE here even though we are only reading this time: 385 - * we'll write to it eventually.. 386 - * 387 384 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned. 388 385 */ 389 386 static inline ··· 779 782 sigset_t __user *up = NULL; 780 783 781 784 if (sig) { 782 - if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t)) 785 + if (!access_ok(sig, sizeof(void *)+sizeof(size_t)) 783 786 || __get_user(up, (sigset_t __user * __user *)sig) 784 787 || __get_user(sigsetsize, 785 788 (size_t __user *)(sig+sizeof(void *)))) ··· 799 802 sigset_t __user *up = NULL; 800 803 801 804 if (sig) { 802 - if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t)) 805 + if (!access_ok(sig, sizeof(void *)+sizeof(size_t)) 803 806 || __get_user(up, (sigset_t __user * __user *)sig) 804 807 || __get_user(sigsetsize, 805 808 (size_t __user *)(sig+sizeof(void *)))) ··· 1365 1368 compat_uptr_t up = 0; 1366 1369 1367 1370 if (sig) { 1368 - if (!access_ok(VERIFY_READ, sig, 1371 + if (!access_ok(sig, 1369 1372 sizeof(compat_uptr_t)+sizeof(compat_size_t)) || 1370 1373 __get_user(up, (compat_uptr_t __user *)sig) || 1371 1374 __get_user(sigsetsize, ··· 1387 1390 compat_uptr_t up = 0; 1388 1391 1389 1392 if (sig) { 1390 - if (!access_ok(VERIFY_READ, sig, 1393 + if (!access_ok(sig, 1391 1394 sizeof(compat_uptr_t)+sizeof(compat_size_t)) || 1392 1395 __get_user(up, (compat_uptr_t __user *)sig) || 1393 1396 __get_user(sigsetsize,
+6 -6
include/asm-generic/uaccess.h
··· 35 35 #define segment_eq(a, b) ((a).seg == (b).seg) 36 36 #endif 37 37 38 - #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size)) 38 + #define access_ok(addr, size) __access_ok((unsigned long)(addr),(size)) 39 39 40 40 /* 41 41 * The architecture should really override this if possible, at least ··· 78 78 ({ \ 79 79 void __user *__p = (ptr); \ 80 80 might_fault(); \ 81 - access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ? \ 81 + access_ok(__p, sizeof(*ptr)) ? \ 82 82 __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \ 83 83 -EFAULT; \ 84 84 }) ··· 140 140 ({ \ 141 141 const void __user *__p = (ptr); \ 142 142 might_fault(); \ 143 - access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \ 143 + access_ok(__p, sizeof(*ptr)) ? \ 144 144 __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\ 145 145 ((x) = (__typeof__(*(ptr)))0,-EFAULT); \ 146 146 }) ··· 175 175 static inline long 176 176 strncpy_from_user(char *dst, const char __user *src, long count) 177 177 { 178 - if (!access_ok(VERIFY_READ, src, 1)) 178 + if (!access_ok(src, 1)) 179 179 return -EFAULT; 180 180 return __strncpy_from_user(dst, src, count); 181 181 } ··· 196 196 */ 197 197 static inline long strnlen_user(const char __user *src, long n) 198 198 { 199 - if (!access_ok(VERIFY_READ, src, 1)) 199 + if (!access_ok(src, 1)) 200 200 return 0; 201 201 return __strnlen_user(src, n); 202 202 } ··· 217 217 clear_user(void __user *to, unsigned long n) 218 218 { 219 219 might_fault(); 220 - if (!access_ok(VERIFY_WRITE, to, n)) 220 + if (!access_ok(to, n)) 221 221 return n; 222 222 223 223 return __clear_user(to, n);
+2 -2
include/linux/regset.h
··· 376 376 if (!regset->get) 377 377 return -EOPNOTSUPP; 378 378 379 - if (!access_ok(VERIFY_WRITE, data, size)) 379 + if (!access_ok(data, size)) 380 380 return -EFAULT; 381 381 382 382 return regset->get(target, regset, offset, size, NULL, data); ··· 402 402 if (!regset->set) 403 403 return -EOPNOTSUPP; 404 404 405 - if (!access_ok(VERIFY_READ, data, size)) 405 + if (!access_ok(data, size)) 406 406 return -EFAULT; 407 407 408 408 return regset->set(target, regset, offset, size, NULL, data);
+3 -6
include/linux/uaccess.h
··· 6 6 #include <linux/thread_info.h> 7 7 #include <linux/kasan-checks.h> 8 8 9 - #define VERIFY_READ 0 10 - #define VERIFY_WRITE 1 11 - 12 9 #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) 13 10 14 11 #include <asm/uaccess.h> ··· 108 111 { 109 112 unsigned long res = n; 110 113 might_fault(); 111 - if (likely(access_ok(VERIFY_READ, from, n))) { 114 + if (likely(access_ok(from, n))) { 112 115 kasan_check_write(to, n); 113 116 res = raw_copy_from_user(to, from, n); 114 117 } ··· 126 129 _copy_to_user(void __user *to, const void *from, unsigned long n) 127 130 { 128 131 might_fault(); 129 - if (access_ok(VERIFY_WRITE, to, n)) { 132 + if (access_ok(to, n)) { 130 133 kasan_check_read(from, n); 131 134 n = raw_copy_to_user(to, from, n); 132 135 } ··· 157 160 copy_in_user(void __user *to, const void __user *from, unsigned long n) 158 161 { 159 162 might_fault(); 160 - if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) 163 + if (access_ok(to, n) && access_ok(from, n)) 161 164 n = raw_copy_in_user(to, from, n); 162 165 return n; 163 166 }
+2 -2
include/net/checksum.h
··· 30 30 __wsum csum_and_copy_from_user (const void __user *src, void *dst, 31 31 int len, __wsum sum, int *err_ptr) 32 32 { 33 - if (access_ok(VERIFY_READ, src, len)) 33 + if (access_ok(src, len)) 34 34 return csum_partial_copy_from_user(src, dst, len, sum, err_ptr); 35 35 36 36 if (len) ··· 46 46 { 47 47 sum = csum_partial(src, len, sum); 48 48 49 - if (access_ok(VERIFY_WRITE, dst, len)) { 49 + if (access_ok(dst, len)) { 50 50 if (copy_to_user(dst, src, len) == 0) 51 51 return sum; 52 52 }
+1 -1
kernel/bpf/syscall.c
··· 79 79 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 80 80 return -E2BIG; 81 81 82 - if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size))) 82 + if (unlikely(!access_ok(uaddr, actual_size))) 83 83 return -EFAULT; 84 84 85 85 if (actual_size <= expected_size)
+8 -8
kernel/compat.c
··· 95 95 96 96 static int __compat_get_timeval(struct timeval *tv, const struct old_timeval32 __user *ctv) 97 97 { 98 - return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) || 98 + return (!access_ok(ctv, sizeof(*ctv)) || 99 99 __get_user(tv->tv_sec, &ctv->tv_sec) || 100 100 __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; 101 101 } 102 102 103 103 static int __compat_put_timeval(const struct timeval *tv, struct old_timeval32 __user *ctv) 104 104 { 105 - return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) || 105 + return (!access_ok(ctv, sizeof(*ctv)) || 106 106 __put_user(tv->tv_sec, &ctv->tv_sec) || 107 107 __put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; 108 108 } 109 109 110 110 static int __compat_get_timespec(struct timespec *ts, const struct old_timespec32 __user *cts) 111 111 { 112 - return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || 112 + return (!access_ok(cts, sizeof(*cts)) || 113 113 __get_user(ts->tv_sec, &cts->tv_sec) || 114 114 __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; 115 115 } 116 116 117 117 static int __compat_put_timespec(const struct timespec *ts, struct old_timespec32 __user *cts) 118 118 { 119 - return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || 119 + return (!access_ok(cts, sizeof(*cts)) || 120 120 __put_user(ts->tv_sec, &cts->tv_sec) || 121 121 __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; 122 122 } ··· 335 335 const struct compat_sigevent __user *u_event) 336 336 { 337 337 memset(event, 0, sizeof(*event)); 338 - return (!access_ok(VERIFY_READ, u_event, sizeof(*u_event)) || 338 + return (!access_ok(u_event, sizeof(*u_event)) || 339 339 __get_user(event->sigev_value.sival_int, 340 340 &u_event->sigev_value.sival_int) || 341 341 __get_user(event->sigev_signo, &u_event->sigev_signo) || ··· 354 354 bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); 355 355 nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); 356 356 357 - if (!access_ok(VERIFY_READ, umask, bitmap_size / 8)) 357 + if (!access_ok(umask, bitmap_size / 8)) 358 358 return -EFAULT; 359 359 360 360 user_access_begin(); ··· 384 384 bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); 385 385 nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); 386 386 387 - if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8)) 387 + if (!access_ok(umask, bitmap_size / 8)) 388 388 return -EFAULT; 389 389 390 390 user_access_begin(); ··· 438 438 439 439 ptr = arch_compat_alloc_user_space(len); 440 440 441 - if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) 441 + if (unlikely(!access_ok(ptr, len))) 442 442 return NULL; 443 443 444 444 return ptr;
+1 -1
kernel/events/core.c
··· 10135 10135 u32 size; 10136 10136 int ret; 10137 10137 10138 - if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) 10138 + if (!access_ok(uattr, PERF_ATTR_SIZE_VER0)) 10139 10139 return -EFAULT; 10140 10140 10141 10141 /*
+2 -2
kernel/exit.c
··· 1604 1604 if (!infop) 1605 1605 return err; 1606 1606 1607 - if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop))) 1607 + if (!access_ok(infop, sizeof(*infop))) 1608 1608 return -EFAULT; 1609 1609 1610 1610 user_access_begin(); ··· 1732 1732 if (!infop) 1733 1733 return err; 1734 1734 1735 - if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop))) 1735 + if (!access_ok(infop, sizeof(*infop))) 1736 1736 return -EFAULT; 1737 1737 1738 1738 user_access_begin();
+20 -15
kernel/futex.c
··· 481 481 } 482 482 } 483 483 484 + enum futex_access { 485 + FUTEX_READ, 486 + FUTEX_WRITE 487 + }; 488 + 484 489 /** 485 490 * get_futex_key() - Get parameters which are the keys for a futex 486 491 * @uaddr: virtual address of the futex 487 492 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED 488 493 * @key: address where result is stored. 489 - * @rw: mapping needs to be read/write (values: VERIFY_READ, 490 - * VERIFY_WRITE) 494 + * @rw: mapping needs to be read/write (values: FUTEX_READ, 495 + * FUTEX_WRITE) 491 496 * 492 497 * Return: a negative error code or 0 493 498 * ··· 505 500 * lock_page() might sleep, the caller should not hold a spinlock. 506 501 */ 507 502 static int 508 - get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) 503 + get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_access rw) 509 504 { 510 505 unsigned long address = (unsigned long)uaddr; 511 506 struct mm_struct *mm = current->mm; ··· 521 516 return -EINVAL; 522 517 address -= key->both.offset; 523 518 524 - if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) 519 + if (unlikely(!access_ok(uaddr, sizeof(u32)))) 525 520 return -EFAULT; 526 521 527 522 if (unlikely(should_fail_futex(fshared))) ··· 551 546 * If write access is not required (eg. FUTEX_WAIT), try 552 547 * and get read-only access. 553 548 */ 554 - if (err == -EFAULT && rw == VERIFY_READ) { 549 + if (err == -EFAULT && rw == FUTEX_READ) { 555 550 err = get_user_pages_fast(address, 1, 0, &page); 556 551 ro = 1; 557 552 } ··· 1588 1583 if (!bitset) 1589 1584 return -EINVAL; 1590 1585 1591 - ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ); 1586 + ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ); 1592 1587 if (unlikely(ret != 0)) 1593 1588 goto out; 1594 1589 ··· 1647 1642 oparg = 1 << oparg; 1648 1643 } 1649 1644 1650 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 1645 + if (!access_ok(uaddr, sizeof(u32))) 1651 1646 return -EFAULT; 1652 1647 1653 1648 ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr); ··· 1687 1682 DEFINE_WAKE_Q(wake_q); 1688 1683 1689 1684 retry: 1690 - ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); 1685 + ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ); 1691 1686 if (unlikely(ret != 0)) 1692 1687 goto out; 1693 - ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); 1688 + ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE); 1694 1689 if (unlikely(ret != 0)) 1695 1690 goto out_put_key1; 1696 1691 ··· 1966 1961 } 1967 1962 1968 1963 retry: 1969 - ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); 1964 + ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ); 1970 1965 if (unlikely(ret != 0)) 1971 1966 goto out; 1972 1967 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, 1973 - requeue_pi ? VERIFY_WRITE : VERIFY_READ); 1968 + requeue_pi ? FUTEX_WRITE : FUTEX_READ); 1974 1969 if (unlikely(ret != 0)) 1975 1970 goto out_put_key1; 1976 1971 ··· 2639 2634 * while the syscall executes. 2640 2635 */ 2641 2636 retry: 2642 - ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ); 2637 + ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ); 2643 2638 if (unlikely(ret != 0)) 2644 2639 return ret; 2645 2640 ··· 2798 2793 } 2799 2794 2800 2795 retry: 2801 - ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE); 2796 + ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE); 2802 2797 if (unlikely(ret != 0)) 2803 2798 goto out; 2804 2799 ··· 2977 2972 if ((uval & FUTEX_TID_MASK) != vpid) 2978 2973 return -EPERM; 2979 2974 2980 - ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE); 2975 + ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE); 2981 2976 if (ret) 2982 2977 return ret; 2983 2978 ··· 3204 3199 */ 3205 3200 rt_mutex_init_waiter(&rt_waiter); 3206 3201 3207 - ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); 3202 + ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE); 3208 3203 if (unlikely(ret != 0)) 3209 3204 goto out; 3210 3205
+2 -2
kernel/printk/printk.c
··· 1466 1466 return -EINVAL; 1467 1467 if (!len) 1468 1468 return 0; 1469 - if (!access_ok(VERIFY_WRITE, buf, len)) 1469 + if (!access_ok(buf, len)) 1470 1470 return -EFAULT; 1471 1471 error = wait_event_interruptible(log_wait, 1472 1472 syslog_seq != log_next_seq); ··· 1484 1484 return -EINVAL; 1485 1485 if (!len) 1486 1486 return 0; 1487 - if (!access_ok(VERIFY_WRITE, buf, len)) 1487 + if (!access_ok(buf, len)) 1488 1488 return -EFAULT; 1489 1489 error = syslog_print_all(buf, len, clear); 1490 1490 break;
+2 -2
kernel/ptrace.c
··· 1073 1073 struct iovec kiov; 1074 1074 struct iovec __user *uiov = datavp; 1075 1075 1076 - if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 1076 + if (!access_ok(uiov, sizeof(*uiov))) 1077 1077 return -EFAULT; 1078 1078 1079 1079 if (__get_user(kiov.iov_base, &uiov->iov_base) || ··· 1229 1229 compat_uptr_t ptr; 1230 1230 compat_size_t len; 1231 1231 1232 - if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 1232 + if (!access_ok(uiov, sizeof(*uiov))) 1233 1233 return -EFAULT; 1234 1234 1235 1235 if (__get_user(ptr, &uiov->iov_base) ||
+3 -3
kernel/rseq.c
··· 267 267 268 268 if (unlikely(t->flags & PF_EXITING)) 269 269 return; 270 - if (unlikely(!access_ok(VERIFY_WRITE, t->rseq, sizeof(*t->rseq)))) 270 + if (unlikely(!access_ok(t->rseq, sizeof(*t->rseq)))) 271 271 goto error; 272 272 ret = rseq_ip_fixup(regs); 273 273 if (unlikely(ret < 0)) ··· 295 295 296 296 if (!t->rseq) 297 297 return; 298 - if (!access_ok(VERIFY_READ, t->rseq, sizeof(*t->rseq)) || 298 + if (!access_ok(t->rseq, sizeof(*t->rseq)) || 299 299 rseq_get_rseq_cs(t, &rseq_cs) || in_rseq_cs(ip, &rseq_cs)) 300 300 force_sig(SIGSEGV, t); 301 301 } ··· 351 351 if (!IS_ALIGNED((unsigned long)rseq, __alignof__(*rseq)) || 352 352 rseq_len != sizeof(*rseq)) 353 353 return -EINVAL; 354 - if (!access_ok(VERIFY_WRITE, rseq, rseq_len)) 354 + if (!access_ok(rseq, rseq_len)) 355 355 return -EFAULT; 356 356 current->rseq = rseq; 357 357 current->rseq_len = rseq_len;
+2 -2
kernel/sched/core.c
··· 4450 4450 u32 size; 4451 4451 int ret; 4452 4452 4453 - if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0)) 4453 + if (!access_ok(uattr, SCHED_ATTR_SIZE_VER0)) 4454 4454 return -EFAULT; 4455 4455 4456 4456 /* Zero the full structure, so that a short copy will be nice: */ ··· 4650 4650 { 4651 4651 int ret; 4652 4652 4653 - if (!access_ok(VERIFY_WRITE, uattr, usize)) 4653 + if (!access_ok(uattr, usize)) 4654 4654 return -EFAULT; 4655 4655 4656 4656 /*
+4 -4
kernel/signal.c
··· 3997 3997 3998 3998 if (act) { 3999 3999 old_sigset_t mask; 4000 - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 4000 + if (!access_ok(act, sizeof(*act)) || 4001 4001 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 4002 4002 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || 4003 4003 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || ··· 4012 4012 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4013 4013 4014 4014 if (!ret && oact) { 4015 - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 4015 + if (!access_ok(oact, sizeof(*oact)) || 4016 4016 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 4017 4017 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || 4018 4018 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || ··· 4034 4034 compat_uptr_t handler, restorer; 4035 4035 4036 4036 if (act) { 4037 - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 4037 + if (!access_ok(act, sizeof(*act)) || 4038 4038 __get_user(handler, &act->sa_handler) || 4039 4039 __get_user(restorer, &act->sa_restorer) || 4040 4040 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || ··· 4052 4052 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4053 4053 4054 4054 if (!ret && oact) { 4055 - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 4055 + if (!access_ok(oact, sizeof(*oact)) || 4056 4056 __put_user(ptr_to_compat(old_ka.sa.sa_handler), 4057 4057 &oact->sa_handler) || 4058 4058 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
+1 -1
kernel/sys.c
··· 2627 2627 s.freehigh >>= bitcount; 2628 2628 } 2629 2629 2630 - if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) || 2630 + if (!access_ok(info, sizeof(struct compat_sysinfo)) || 2631 2631 __put_user(s.uptime, &info->uptime) || 2632 2632 __put_user(s.loads[0], &info->loads[0]) || 2633 2633 __put_user(s.loads[1], &info->loads[1]) ||
+1 -1
kernel/trace/bpf_trace.c
··· 170 170 return -EPERM; 171 171 if (unlikely(uaccess_kernel())) 172 172 return -EPERM; 173 - if (!access_ok(VERIFY_WRITE, unsafe_ptr, size)) 173 + if (!access_ok(unsafe_ptr, size)) 174 174 return -EPERM; 175 175 176 176 return probe_kernel_write(unsafe_ptr, src, size);
+2 -2
lib/bitmap.c
··· 443 443 unsigned int ulen, unsigned long *maskp, 444 444 int nmaskbits) 445 445 { 446 - if (!access_ok(VERIFY_READ, ubuf, ulen)) 446 + if (!access_ok(ubuf, ulen)) 447 447 return -EFAULT; 448 448 return __bitmap_parse((const char __force *)ubuf, 449 449 ulen, 1, maskp, nmaskbits); ··· 641 641 unsigned int ulen, unsigned long *maskp, 642 642 int nmaskbits) 643 643 { 644 - if (!access_ok(VERIFY_READ, ubuf, ulen)) 644 + if (!access_ok(ubuf, ulen)) 645 645 return -EFAULT; 646 646 return __bitmap_parselist((const char __force *)ubuf, 647 647 ulen, 1, maskp, nmaskbits);
+4 -4
lib/iov_iter.c
··· 136 136 137 137 static int copyout(void __user *to, const void *from, size_t n) 138 138 { 139 - if (access_ok(VERIFY_WRITE, to, n)) { 139 + if (access_ok(to, n)) { 140 140 kasan_check_read(from, n); 141 141 n = raw_copy_to_user(to, from, n); 142 142 } ··· 145 145 146 146 static int copyin(void *to, const void __user *from, size_t n) 147 147 { 148 - if (access_ok(VERIFY_READ, from, n)) { 148 + if (access_ok(from, n)) { 149 149 kasan_check_write(to, n); 150 150 n = raw_copy_from_user(to, from, n); 151 151 } ··· 614 614 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE 615 615 static int copyout_mcsafe(void __user *to, const void *from, size_t n) 616 616 { 617 - if (access_ok(VERIFY_WRITE, to, n)) { 617 + if (access_ok(to, n)) { 618 618 kasan_check_read(from, n); 619 619 n = copy_to_user_mcsafe((__force void *) to, from, n); 620 620 } ··· 1663 1663 { 1664 1664 if (len > MAX_RW_COUNT) 1665 1665 len = MAX_RW_COUNT; 1666 - if (unlikely(!access_ok(!rw, buf, len))) 1666 + if (unlikely(!access_ok(buf, len))) 1667 1667 return -EFAULT; 1668 1668 1669 1669 iov->iov_base = buf;
+2 -2
lib/usercopy.c
··· 8 8 { 9 9 unsigned long res = n; 10 10 might_fault(); 11 - if (likely(access_ok(VERIFY_READ, from, n))) { 11 + if (likely(access_ok(from, n))) { 12 12 kasan_check_write(to, n); 13 13 res = raw_copy_from_user(to, from, n); 14 14 } ··· 23 23 unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) 24 24 { 25 25 might_fault(); 26 - if (likely(access_ok(VERIFY_WRITE, to, n))) { 26 + if (likely(access_ok(to, n))) { 27 27 kasan_check_read(from, n); 28 28 n = raw_copy_to_user(to, from, n); 29 29 }
+2 -4
mm/gup.c
··· 1813 1813 len = (unsigned long) nr_pages << PAGE_SHIFT; 1814 1814 end = start + len; 1815 1815 1816 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, 1817 - (void __user *)start, len))) 1816 + if (unlikely(!access_ok((void __user *)start, len))) 1818 1817 return 0; 1819 1818 1820 1819 /* ··· 1867 1868 if (nr_pages <= 0) 1868 1869 return 0; 1869 1870 1870 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, 1871 - (void __user *)start, len))) 1871 + if (unlikely(!access_ok((void __user *)start, len))) 1872 1872 return -EFAULT; 1873 1873 1874 1874 if (gup_fast_permitted(start, nr_pages, write)) {
+2 -2
mm/mincore.c
··· 233 233 return -EINVAL; 234 234 235 235 /* ..and we need to be passed a valid user-space range */ 236 - if (!access_ok(VERIFY_READ, (void __user *) start, len)) 236 + if (!access_ok((void __user *) start, len)) 237 237 return -ENOMEM; 238 238 239 239 /* This also avoids any overflows on PAGE_ALIGN */ 240 240 pages = len >> PAGE_SHIFT; 241 241 pages += (offset_in_page(len)) != 0; 242 242 243 - if (!access_ok(VERIFY_WRITE, vec, pages)) 243 + if (!access_ok(vec, pages)) 244 244 return -EFAULT; 245 245 246 246 tmp = (void *) __get_free_page(GFP_USER);
+1 -1
net/batman-adv/icmp_socket.c
··· 147 147 if (!buf || count < sizeof(struct batadv_icmp_packet)) 148 148 return -EINVAL; 149 149 150 - if (!access_ok(VERIFY_WRITE, buf, count)) 150 + if (!access_ok(buf, count)) 151 151 return -EFAULT; 152 152 153 153 error = wait_event_interruptible(socket_client->queue_wait,
+1 -1
net/batman-adv/log.c
··· 136 136 if (count == 0) 137 137 return 0; 138 138 139 - if (!access_ok(VERIFY_WRITE, buf, count)) 139 + if (!access_ok(buf, count)) 140 140 return -EFAULT; 141 141 142 142 error = wait_event_interruptible(debug_log->queue_wait,
+15 -15
net/compat.c
··· 358 358 359 359 if (optlen < sizeof(*up)) 360 360 return -EINVAL; 361 - if (!access_ok(VERIFY_READ, up, sizeof(*up)) || 361 + if (!access_ok(up, sizeof(*up)) || 362 362 __get_user(ktime.tv_sec, &up->tv_sec) || 363 363 __get_user(ktime.tv_usec, &up->tv_usec)) 364 364 return -EFAULT; ··· 438 438 439 439 if (!err) { 440 440 if (put_user(sizeof(*up), optlen) || 441 - !access_ok(VERIFY_WRITE, up, sizeof(*up)) || 441 + !access_ok(up, sizeof(*up)) || 442 442 __put_user(ktime.tv_sec, &up->tv_sec) || 443 443 __put_user(ktime.tv_usec, &up->tv_usec)) 444 444 err = -EFAULT; ··· 590 590 compat_alloc_user_space(sizeof(struct group_req)); 591 591 u32 interface; 592 592 593 - if (!access_ok(VERIFY_READ, gr32, sizeof(*gr32)) || 594 - !access_ok(VERIFY_WRITE, kgr, sizeof(struct group_req)) || 593 + if (!access_ok(gr32, sizeof(*gr32)) || 594 + !access_ok(kgr, sizeof(struct group_req)) || 595 595 __get_user(interface, &gr32->gr_interface) || 596 596 __put_user(interface, &kgr->gr_interface) || 597 597 copy_in_user(&kgr->gr_group, &gr32->gr_group, ··· 611 611 sizeof(struct group_source_req)); 612 612 u32 interface; 613 613 614 - if (!access_ok(VERIFY_READ, gsr32, sizeof(*gsr32)) || 615 - !access_ok(VERIFY_WRITE, kgsr, 614 + if (!access_ok(gsr32, sizeof(*gsr32)) || 615 + !access_ok(kgsr, 616 616 sizeof(struct group_source_req)) || 617 617 __get_user(interface, &gsr32->gsr_interface) || 618 618 __put_user(interface, &kgsr->gsr_interface) || ··· 631 631 struct group_filter __user *kgf; 632 632 u32 interface, fmode, numsrc; 633 633 634 - if (!access_ok(VERIFY_READ, gf32, __COMPAT_GF0_SIZE) || 634 + if (!access_ok(gf32, __COMPAT_GF0_SIZE) || 635 635 __get_user(interface, &gf32->gf_interface) || 636 636 __get_user(fmode, &gf32->gf_fmode) || 637 637 __get_user(numsrc, &gf32->gf_numsrc)) ··· 641 641 if (koptlen < GROUP_FILTER_SIZE(numsrc)) 642 642 return -EINVAL; 643 643 kgf = compat_alloc_user_space(koptlen); 644 - if (!access_ok(VERIFY_WRITE, kgf, koptlen) || 644 + if (!access_ok(kgf, koptlen) || 645 645 __put_user(interface, &kgf->gf_interface) || 646 646 __put_user(fmode, &kgf->gf_fmode) || 647 647 __put_user(numsrc, &kgf->gf_numsrc) || ··· 675 675 return getsockopt(sock, level, optname, optval, optlen); 676 676 677 677 koptlen = compat_alloc_user_space(sizeof(*koptlen)); 678 - if (!access_ok(VERIFY_READ, optlen, sizeof(*optlen)) || 678 + if (!access_ok(optlen, sizeof(*optlen)) || 679 679 __get_user(ulen, optlen)) 680 680 return -EFAULT; 681 681 ··· 685 685 if (klen < GROUP_FILTER_SIZE(0)) 686 686 return -EINVAL; 687 687 688 - if (!access_ok(VERIFY_WRITE, koptlen, sizeof(*koptlen)) || 688 + if (!access_ok(koptlen, sizeof(*koptlen)) || 689 689 __put_user(klen, koptlen)) 690 690 return -EFAULT; 691 691 692 692 /* have to allow space for previous compat_alloc_user_space, too */ 693 693 kgf = compat_alloc_user_space(klen+sizeof(*optlen)); 694 694 695 - if (!access_ok(VERIFY_READ, gf32, __COMPAT_GF0_SIZE) || 695 + if (!access_ok(gf32, __COMPAT_GF0_SIZE) || 696 696 __get_user(interface, &gf32->gf_interface) || 697 697 __get_user(fmode, &gf32->gf_fmode) || 698 698 __get_user(numsrc, &gf32->gf_numsrc) || ··· 706 706 if (err) 707 707 return err; 708 708 709 - if (!access_ok(VERIFY_READ, koptlen, sizeof(*koptlen)) || 709 + if (!access_ok(koptlen, sizeof(*koptlen)) || 710 710 __get_user(klen, koptlen)) 711 711 return -EFAULT; 712 712 713 713 ulen = klen - (sizeof(*kgf)-sizeof(*gf32)); 714 714 715 - if (!access_ok(VERIFY_WRITE, optlen, sizeof(*optlen)) || 715 + if (!access_ok(optlen, sizeof(*optlen)) || 716 716 __put_user(ulen, optlen)) 717 717 return -EFAULT; 718 718 719 - if (!access_ok(VERIFY_READ, kgf, klen) || 720 - !access_ok(VERIFY_WRITE, gf32, ulen) || 719 + if (!access_ok(kgf, klen) || 720 + !access_ok(gf32, ulen) || 721 721 __get_user(interface, &kgf->gf_interface) || 722 722 __get_user(fmode, &kgf->gf_fmode) || 723 723 __get_user(numsrc, &kgf->gf_numsrc) ||
+1 -1
net/sunrpc/sysctl.c
··· 89 89 left = *lenp; 90 90 91 91 if (write) { 92 - if (!access_ok(VERIFY_READ, buffer, left)) 92 + if (!access_ok(buffer, left)) 93 93 return -EFAULT; 94 94 p = buffer; 95 95 while (left && __get_user(c, p) >= 0 && isspace(c))
+1 -1
security/tomoyo/common.c
··· 2591 2591 int idx; 2592 2592 if (!head->write) 2593 2593 return -ENOSYS; 2594 - if (!access_ok(VERIFY_READ, buffer, buffer_len)) 2594 + if (!access_ok(buffer, buffer_len)) 2595 2595 return -EFAULT; 2596 2596 if (mutex_lock_interruptible(&head->io_sem)) 2597 2597 return -EINTR;
+1 -1
sound/core/seq/seq_clientmgr.c
··· 393 393 if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT)) 394 394 return -ENXIO; 395 395 396 - if (!access_ok(VERIFY_WRITE, buf, count)) 396 + if (!access_ok(buf, count)) 397 397 return -EFAULT; 398 398 399 399 /* check client structures are in place */
+2 -2
sound/isa/sb/emu8000_patch.c
··· 183 183 } 184 184 185 185 if (sp->v.mode_flags & SNDRV_SFNT_SAMPLE_8BITS) { 186 - if (!access_ok(VERIFY_READ, data, sp->v.size)) 186 + if (!access_ok(data, sp->v.size)) 187 187 return -EFAULT; 188 188 } else { 189 - if (!access_ok(VERIFY_READ, data, sp->v.size * 2)) 189 + if (!access_ok(data, sp->v.size * 2)) 190 190 return -EFAULT; 191 191 } 192 192
+1 -1
tools/perf/util/include/asm/uaccess.h
··· 10 10 11 11 #define get_user __get_user 12 12 13 - #define access_ok(type, addr, size) 1 13 + #define access_ok(addr, size) 1 14 14 15 15 #endif
+1 -2
virt/kvm/kvm_main.c
··· 939 939 /* We can read the guest memory with __xxx_user() later on. */ 940 940 if ((id < KVM_USER_MEM_SLOTS) && 941 941 ((mem->userspace_addr & (PAGE_SIZE - 1)) || 942 - !access_ok(VERIFY_WRITE, 943 - (void __user *)(unsigned long)mem->userspace_addr, 942 + !access_ok((void __user *)(unsigned long)mem->userspace_addr, 944 943 mem->memory_size))) 945 944 goto out; 946 945 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)